diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index fb25e1ad96..93dfe4b1e8 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -224,6 +224,7 @@ jobs: RUST_BACKTRACE: 1 USE_DOCKER: "true" SHORT_PREFIX: 1 + CITREA_E2E_TEST_BINARY: ${{ github.workspace }}/target/debug/citrea - name: Upload coverage uses: codecov/codecov-action@v4 with: @@ -395,6 +396,7 @@ jobs: BONSAI_API_KEY: ${{ secrets.BONSAI_API_KEY }} # TODO: remove this once we don't use the client on tests USE_DOCKER: "true" SHORT_PREFIX: 1 + CITREA_E2E_TEST_BINARY: ${{ github.workspace }}/target/debug/citrea system-contracts: strategy: diff --git a/.github/workflows/nightly_build_push.yml b/.github/workflows/nightly_build_push.yml new file mode 100644 index 0000000000..e053ba5c60 --- /dev/null +++ b/.github/workflows/nightly_build_push.yml @@ -0,0 +1,182 @@ +name: nightly-build-and-push + +on: + push: + branches: + - nightly + +env: + EXPECTED_BITCOIN_DA_ID: ${{ vars.EXPECTED_BITCOIN_DA_ID }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + IMAGE_TAG: ${{ github.sha }} + +jobs: + + validate_DA_ID_format: + runs-on: ubuntu-latest + steps: + - name: Validate EXPECTED_BITCOIN_DA_ID format + run: | + echo "Raw EXPECTED_BITCOIN_DA_ID value:" + echo "$EXPECTED_BITCOIN_DA_ID" + + echo "Length of EXPECTED_BITCOIN_DA_ID: ${#EXPECTED_BITCOIN_DA_ID}" + + if [ -z "${EXPECTED_BITCOIN_DA_ID// }" ]; then + echo "Error: EXPECTED_BITCOIN_DA_ID is not set, empty, or contains only spaces" + exit 1 + fi + + # Remove any trailing newline or carriage return + EXPECTED_BITCOIN_DA_ID=$(echo "$EXPECTED_BITCOIN_DA_ID" | tr -d '\n\r') + + # Count commas and spaces + comma_count=$(echo "$EXPECTED_BITCOIN_DA_ID" | tr -cd ',' | wc -c) + space_count=$(echo "$EXPECTED_BITCOIN_DA_ID" | tr -cd ' ' | wc -c) + + echo "Number of commas: $comma_count" + echo "Number of spaces: $space_count" + + # Split the string into an array and trim each element + IFS=', ' read -ra raw_numbers <<< "$EXPECTED_BITCOIN_DA_ID" + numbers=() + for num in "${raw_numbers[@]}"; do + trimmed_num=$(echo "$num" | tr -d '[:space:]') # Remove all whitespace + numbers+=("$trimmed_num") + done + + echo "Number of elements after splitting and trimming: ${#numbers[@]}" + + # Check if there are exactly 8 numbers + if [ ${#numbers[@]} -ne 8 ]; then + echo "Error: EXPECTED_BITCOIN_DA_ID should contain exactly 8 numbers" + echo "Actual number of elements: ${#numbers[@]}" + exit 1 + fi + + # Check if all numbers are valid u32 + for i in "${!numbers[@]}"; do + num=${numbers[$i]} + echo "Checking number $((i+1)): '$num'" + echo "Hex representation: $(echo -n "$num" | xxd -p)" + if ! [[ $num =~ ^[0-9]+$ ]]; then + echo "Error: '$num' is not composed of digits only" + exit 1 + fi + if [ $num -gt 4294967295 ]; then + echo "Error: '$num' is greater than 4294967295" + exit 1 + fi + done + + # Reconstruct the trimmed DA_ID + trimmed_da_id=$(IFS=', '; echo "${numbers[*]}") + + # Final check + if [ $comma_count -eq 7 ] && [ $space_count -eq 7 ] && [ ${#numbers[@]} -eq 8 ]; then + echo "EXPECTED_BITCOIN_DA_ID is valid:" + echo "- Contains 7 commas" + echo "- Contains 7 spaces" + echo "- Contains 8 valid u32 numbers" + echo "Original value: $EXPECTED_BITCOIN_DA_ID" + echo "Trimmed value: $trimmed_da_id" + else + echo "Error: EXPECTED_BITCOIN_DA_ID format is incorrect" + echo "- Comma count: $comma_count (should be 7)" + echo "- Space count: $space_count (should be 7)" + echo "- Number count: ${#numbers[@]} (should be 8)" + exit 1 + fi + + linux_amd64_binary_extraction: + needs: validate_DA_ID_format + runs-on: ubicloud-standard-30 + strategy: + matrix: + include: + - short_prefix: 1 + short_prefix_value: "-short-prefix" + - short_prefix: 0 + short_prefix_value: "" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Dependencies + run: | + sudo apt update && sudo apt -y install curl gcc cpp cmake clang llvm + sudo apt -y autoremove && sudo apt clean && sudo rm -rf /var/lib/apt/lists/* + + - name: Install Rust + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + rustup install 1.79.0 + rustup default 1.79.0 + + - name: Install Cargo Binstall + run: | + cargo install --version 1.7.0 cargo-binstall + + - name: Install cargo-risczero + run: | + cargo binstall cargo-risczero@1.0.5 --no-confirm + + - name: Install risc0-zkvm toolchain + run: cargo risczero install --version r0.1.79.0-2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Build Project + env: + REPR_GUEST_BUILD: 1 + SHORT_PREFIX: ${{ matrix.short_prefix }} + SKIP_GUEST_BUILD: 0 + run: | + cargo build --release + + - name: Check BITCOIN_DA_ID + id: check-id + run: | + RESULT=$(grep -R "BITCOIN_DA_ID" target/ || echo "Grep failed") + EXPECTED_BITCOIN_DA_ID=$(echo "${{ env.EXPECTED_BITCOIN_DA_ID }}" | tr -d '\n\r') + if echo "$RESULT" | grep -q "$EXPECTED_BITCOIN_DA_ID"; then + echo "Check passed successfully." + echo "Expected: BITCOIN_DA_ID ${{ env.EXPECTED_BITCOIN_DA_ID }} " + echo "Actual: $RESULT" + + else + echo "Check failed. Expected: BITCOIN_DA_ID ${{ env.EXPECTED_BITCOIN_DA_ID }} " + echo "Actual: $RESULT" + exit 1 + fi + + - name: Copy binary to build-push/nightly + run: | + cp target/release/citrea build-push/nightly/citrea + chmod +x build-push/nightly/citrea + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build Docker image + uses: docker/build-push-action@v6 + with: + file: ./build-push/nightly/Dockerfile + context: ./build-push/nightly + tags: ${{ vars.DOCKERHUB_USERNAME }}/citrea:${{ env.IMAGE_TAG }}${{ matrix.short_prefix_value }} + platforms: linux/amd64 + push: true + load: false + provenance: false + + + diff --git a/Cargo.lock b/Cargo.lock index 5dbbb70e94..7c8a63e485 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1222,10 +1222,11 @@ dependencies = [ "hex", "pin-project", "rand 0.8.5", + "reqwest 0.12.5", "serde", "serde_json", "sha2", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "thiserror", "tokio", "tracing", @@ -1270,7 +1271,7 @@ dependencies = [ [[package]] name = "bitcoincore-rpc" version = "0.18.0" -source = "git+https://github.com/chainwayxyz/rust-bitcoincore-rpc.git?rev=0ae498d#0ae498da2b6aa6e89b1a1bc9d484eb55919718ba" +source = "git+https://github.com/chainwayxyz/rust-bitcoincore-rpc.git?rev=ede8097#ede8097e0f0fce5d69e9cab6f85fda39e788a97b" dependencies = [ "async-trait", "bitcoincore-rpc-json", @@ -1285,7 +1286,7 @@ dependencies = [ [[package]] name = "bitcoincore-rpc-json" version = "0.18.0" -source = "git+https://github.com/chainwayxyz/rust-bitcoincore-rpc.git?rev=0ae498d#0ae498da2b6aa6e89b1a1bc9d484eb55919718ba" +source = "git+https://github.com/chainwayxyz/rust-bitcoincore-rpc.git?rev=ede8097#ede8097e0f0fce5d69e9cab6f85fda39e788a97b" dependencies = [ "bitcoin", "serde", @@ -1693,9 +1694,9 @@ dependencies = [ "bitcoin", "bitcoin-da", "bitcoincore-rpc", - "bollard", "borsh", "citrea-common", + "citrea-e2e", "citrea-evm", "citrea-fullnode", "citrea-primitives", @@ -1705,13 +1706,11 @@ dependencies = [ "citrea-stf", "clap", "ethereum-rpc", - "futures", "hex", "jsonrpsee", "log", "log-panics", "proptest", - "rand 0.8.5", "regex", "reqwest 0.12.5", "reth-primitives", @@ -1728,18 +1727,17 @@ dependencies = [ "sha2", "soft-confirmation-rule-enforcer", "sov-db", - "sov-ledger-rpc", + "sov-ledger-rpc 0.5.0-rc.1", "sov-mock-da", "sov-modules-api", "sov-modules-rollup-blueprint", "sov-modules-stf-blueprint", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-stf-runner", "tempfile", "tokio", - "toml", "tracing", "tracing-subscriber 0.3.18", ] @@ -1750,19 +1748,51 @@ version = "0.5.0-rc.1" dependencies = [ "anyhow", "backoff", + "borsh", "citrea-primitives", + "citrea-pruning", "futures", + "hex", "hyper 1.4.1", "jsonrpsee", "lru", + "serde", "sov-db", - "sov-rollup-interface", + "sov-mock-da", + "sov-rollup-interface 0.5.0-rc.1", + "sov-stf-runner", + "tempfile", "tokio", "tokio-util", + "toml", "tower-http", "tracing", ] +[[package]] +name = "citrea-e2e" +version = "0.1.0" +source = "git+https://github.com/chainwayxyz/citrea-e2e?rev=a96abcf#a96abcfe145b9d2467e2f6a5b996cf458bb2d079" +dependencies = [ + "anyhow", + "async-trait", + "bitcoin", + "bitcoincore-rpc", + "bollard", + "futures", + "hex", + "jsonrpsee", + "rand 0.8.5", + "serde", + "serde_json", + "sov-ledger-rpc 0.5.0-rc.1 (git+https://github.com/chainwayxyz/citrea?rev=82bf52d)", + "sov-rollup-interface 0.5.0-rc.1 (git+https://github.com/chainwayxyz/citrea?rev=82bf52d)", + "tempfile", + "tokio", + "toml", + "tracing", +] + [[package]] name = "citrea-evm" version = "0.5.0-rc.1" @@ -1803,7 +1833,7 @@ dependencies = [ "serde_json", "sov-modules-api", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-stf-runner", "tempfile", @@ -1839,7 +1869,7 @@ dependencies = [ "sov-modules-rollup-blueprint", "sov-modules-stf-blueprint", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-stf-runner", "tempfile", @@ -1857,7 +1887,7 @@ dependencies = [ "anyhow", "reth-primitives", "serde", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tokio", "tracing", ] @@ -1889,9 +1919,10 @@ dependencies = [ "sov-mock-da", "sov-mock-zkvm", "sov-modules-api", + "sov-modules-core", "sov-modules-rollup-blueprint", "sov-modules-stf-blueprint", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-stf-runner", "tempfile", "tokio", @@ -1937,7 +1968,7 @@ dependencies = [ "serde", "sov-db", "sov-risc0-adapter", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tracing", ] @@ -1983,7 +2014,7 @@ dependencies = [ "sov-modules-api", "sov-modules-rollup-blueprint", "sov-modules-stf-blueprint", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-stf-runner", "tempfile", @@ -2012,7 +2043,7 @@ dependencies = [ "sov-accounts", "sov-modules-api", "sov-modules-stf-blueprint", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-stf-runner", "tracing", @@ -2421,7 +2452,7 @@ dependencies = [ "sha2", "sov-mock-da", "sov-mock-zkvm", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", ] [[package]] @@ -2445,14 +2476,13 @@ dependencies = [ "sov-modules-api", "sov-modules-stf-blueprint", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-sequencer-registry", "sov-state", "sov-stf-runner", "sov-value-setter", "tempfile", "tokio", - "toml", "tracing", ] @@ -2794,7 +2824,7 @@ dependencies = [ "serde_json", "sov-db", "sov-modules-api", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tokio", "tracing", ] @@ -3770,7 +3800,7 @@ dependencies = [ "serde", "sov-modules-api", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-schema-db", "sov-state", "tempfile", @@ -7045,7 +7075,7 @@ dependencies = [ "reth-rpc-types", "serde", "serde_json", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tokio", "tracing", ] @@ -7318,7 +7348,7 @@ dependencies = [ "sov-mock-da", "sov-modules-api", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "tempfile", "tracing", @@ -7445,7 +7475,7 @@ dependencies = [ "rocksdb", "serde", "sha2", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-schema-db", "tempfile", "tokio", @@ -7463,11 +7493,21 @@ dependencies = [ "serde_json", "sov-db", "sov-modules-api", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tempfile", "tokio", ] +[[package]] +name = "sov-ledger-rpc" +version = "0.5.0-rc.1" +source = "git+https://github.com/chainwayxyz/citrea?rev=82bf52d#82bf52d067c1886d92c55c141c8fc062b1931164" +dependencies = [ + "jsonrpsee", + "serde", + "sov-rollup-interface 0.5.0-rc.1 (git+https://github.com/chainwayxyz/citrea?rev=82bf52d)", +] + [[package]] name = "sov-mock-da" version = "0.5.0-rc.1" @@ -7484,7 +7524,7 @@ dependencies = [ "serde", "serde_json", "sha2", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tempfile", "tokio", "tokio-stream", @@ -7500,7 +7540,7 @@ dependencies = [ "bincode", "borsh", "serde", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", ] [[package]] @@ -7545,7 +7585,7 @@ dependencies = [ "sov-modules-core", "sov-modules-macros", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "tempfile", "thiserror", @@ -7573,7 +7613,7 @@ dependencies = [ "sov-modules-api", "sov-modules-core", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "tempfile", "thiserror", @@ -7593,7 +7633,7 @@ dependencies = [ "serde_json", "sov-modules-api", "sov-modules-core", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "syn 1.0.109", "trybuild", @@ -7606,16 +7646,17 @@ dependencies = [ "anyhow", "async-trait", "borsh", + "citrea-common", "hex", "jsonrpsee", "serde", "serde_json", "sov-cli", "sov-db", - "sov-ledger-rpc", + "sov-ledger-rpc 0.5.0-rc.1", "sov-modules-api", "sov-modules-stf-blueprint", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-stf-runner", "tokio", @@ -7637,7 +7678,7 @@ dependencies = [ "rs_merkle", "serde", "sov-modules-api", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "sov-zk-cycle-macros", "sov-zk-cycle-utils", @@ -7674,7 +7715,7 @@ dependencies = [ "sha2", "sov-db", "sov-mock-da", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-schema-db", "sov-state", "tempfile", @@ -7696,7 +7737,7 @@ dependencies = [ "risc0-zkvm", "risc0-zkvm-platform", "serde", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-zk-cycle-utils", ] @@ -7714,7 +7755,7 @@ dependencies = [ "sov-bank", "sov-mock-da", "sov-modules-api", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "tokio", ] @@ -7740,6 +7781,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "sov-rollup-interface" +version = "0.5.0-rc.1" +source = "git+https://github.com/chainwayxyz/citrea?rev=82bf52d#82bf52d067c1886d92c55c141c8fc062b1931164" +dependencies = [ + "anyhow", + "async-trait", + "borsh", + "bytes", + "digest 0.10.7", + "futures", + "hex", + "proptest", + "serde", + "sha2", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "sov-schema-db" version = "0.5.0-rc.1" @@ -7804,7 +7865,7 @@ dependencies = [ "sha2", "sov-db", "sov-modules-core", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-zk-cycle-macros", "tempfile", "thiserror", @@ -7817,7 +7878,6 @@ dependencies = [ "anyhow", "async-trait", "borsh", - "citrea-pruning", "futures", "hex", "hyper 1.4.1", @@ -7827,15 +7887,13 @@ dependencies = [ "serde_json", "sha2", "sov-db", - "sov-mock-da", "sov-modules-api", "sov-prover-storage-manager", - "sov-rollup-interface", + "sov-rollup-interface 0.5.0-rc.1", "sov-state", "tempfile", "thiserror", "tokio", - "toml", "tower", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index 2026e3dbda..b3cb98aae1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -167,8 +167,6 @@ tower-http = { version = "0.5.0", features = ["full"] } tower = { version = "0.4.13", features = ["full"] } hyper = { version = "1.4.0" } -bollard = { version = "0.17.1" } - [patch.'https://github.com/eigerco/celestia-node-rs.git'] # Uncomment to apply local changes # celestia-proto = { path = "../celestia-node-rs/proto" } @@ -188,4 +186,4 @@ ed25519-dalek = { git = "https://github.com/risc0/curve25519-dalek", tag = "curv crypto-bigint = { git = "https://github.com/risc0/RustCrypto-crypto-bigint", tag = "v0.5.5-risczero.0" } secp256k1 = { git = "https://github.com/Sovereign-Labs/rust-secp256k1.git", branch = "risc0-compatible-0-29-0" } k256 = { git = "https://github.com/risc0/RustCrypto-elliptic-curves", tag = "k256/v0.13.3-risczero.0" } -bitcoincore-rpc = { version = "0.18.0", git = "https://github.com/chainwayxyz/rust-bitcoincore-rpc.git", rev = "0ae498d" } +bitcoincore-rpc = { version = "0.18.0", git = "https://github.com/chainwayxyz/rust-bitcoincore-rpc.git", rev = "ede8097" } diff --git a/bin/citrea/Cargo.toml b/bin/citrea/Cargo.toml index a98926519b..7959bcd4a7 100644 --- a/bin/citrea/Cargo.toml +++ b/bin/citrea/Cargo.toml @@ -86,10 +86,7 @@ rustc_version_runtime = { workspace = true } # bitcoin-e2e dependencies bitcoin.workspace = true bitcoincore-rpc.workspace = true -bollard.workspace = true -futures.workspace = true -rand.workspace = true -toml.workspace = true +citrea-e2e = { git = "https://github.com/chainwayxyz/citrea-e2e", rev = "a96abcf" } [features] default = [] # Deviate from convention by making the "native" feature active by default. This aligns with how this package is meant to be used (as a binary first, library second). diff --git a/bin/citrea/src/main.rs b/bin/citrea/src/main.rs index 886b239d64..3e6d20a091 100644 --- a/bin/citrea/src/main.rs +++ b/bin/citrea/src/main.rs @@ -3,14 +3,13 @@ use core::fmt::Debug as DebugTrait; use anyhow::Context as _; use bitcoin_da::service::BitcoinServiceConfig; use citrea::{initialize_logging, BitcoinRollup, CitreaRollupBlueprint, MockDemoRollup}; -use citrea_sequencer::SequencerConfig; +use citrea_common::{from_toml_path, FullNodeConfig, ProverConfig, SequencerConfig}; use citrea_stf::genesis_config::GenesisPaths; use clap::Parser; use sov_mock_da::MockDaConfig; use sov_modules_api::Spec; use sov_modules_rollup_blueprint::RollupBlueprint; use sov_state::storage::NativeStorage; -use sov_stf_runner::{from_toml_path, FullNodeConfig, ProverConfig}; use tracing::{error, instrument}; #[cfg(test)] diff --git a/bin/citrea/src/rollup/bitcoin.rs b/bin/citrea/src/rollup/bitcoin.rs index 6bc4fb380b..63555ba816 100644 --- a/bin/citrea/src/rollup/bitcoin.rs +++ b/bin/citrea/src/rollup/bitcoin.rs @@ -6,6 +6,7 @@ use bitcoin_da::service::{BitcoinService, BitcoinServiceConfig, TxidWrapper}; use bitcoin_da::spec::{BitcoinSpec, RollupParams}; use bitcoin_da::verifier::BitcoinVerifier; use citrea_common::rpc::register_healthcheck_rpc; +use citrea_common::{FullNodeConfig, ProverConfig}; use citrea_primitives::{REVEAL_BATCH_PROOF_PREFIX, REVEAL_LIGHT_CLIENT_PREFIX}; use citrea_prover::prover_service::ParallelProverService; use citrea_risc0_bonsai_adapter::host::Risc0BonsaiHost; @@ -23,7 +24,6 @@ use sov_rollup_interface::services::da::SenderWithNotifier; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::zk::{Zkvm, ZkvmHost}; use sov_state::{DefaultStorageSpec, Storage, ZkStorage}; -use sov_stf_runner::{FullNodeConfig, ProverConfig}; use tokio::sync::broadcast; use tokio::sync::mpsc::unbounded_channel; use tracing::instrument; @@ -105,7 +105,7 @@ impl RollupBlueprint for BitcoinRollup { #[instrument(level = "trace", skip_all, err)] fn create_storage_manager( &self, - rollup_config: &sov_stf_runner::FullNodeConfig, + rollup_config: &citrea_common::FullNodeConfig, ) -> Result { let storage_config = StorageConfig { path: rollup_config.storage.path.clone(), diff --git a/bin/citrea/src/rollup/mock.rs b/bin/citrea/src/rollup/mock.rs index 2ba57f2bb0..756ed35de9 100644 --- a/bin/citrea/src/rollup/mock.rs +++ b/bin/citrea/src/rollup/mock.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use async_trait::async_trait; use citrea_common::rpc::register_healthcheck_rpc; +use citrea_common::{FullNodeConfig, ProverConfig}; use citrea_prover::prover_service::ParallelProverService; use citrea_risc0_bonsai_adapter::host::Risc0BonsaiHost; use citrea_risc0_bonsai_adapter::Digest; @@ -18,7 +19,6 @@ use sov_prover_storage_manager::ProverStorageManager; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::zk::{Zkvm, ZkvmHost}; use sov_state::{DefaultStorageSpec, Storage, ZkStorage}; -use sov_stf_runner::{FullNodeConfig, ProverConfig}; use tokio::sync::broadcast; use crate::CitreaRollupBlueprint; diff --git a/bin/citrea/src/rollup/mod.rs b/bin/citrea/src/rollup/mod.rs index f6cf5b2ecd..79c842bf8d 100644 --- a/bin/citrea/src/rollup/mod.rs +++ b/bin/citrea/src/rollup/mod.rs @@ -2,10 +2,11 @@ use std::sync::Arc; use anyhow::anyhow; use async_trait::async_trait; +use citrea_common::{FullNodeConfig, ProverConfig, SequencerConfig}; use citrea_fullnode::{CitreaFullnode, FullNode}; use citrea_primitives::forks::FORKS; use citrea_prover::{CitreaProver, Prover}; -use citrea_sequencer::{CitreaSequencer, Sequencer, SequencerConfig}; +use citrea_sequencer::{CitreaSequencer, Sequencer}; use sov_db::ledger_db::SharedLedgerOps; use sov_db::rocks_db_config::RocksdbConfig; use sov_db::schema::types::BatchNumber; @@ -15,7 +16,7 @@ use sov_modules_rollup_blueprint::RollupBlueprint; use sov_modules_stf_blueprint::{Runtime as RuntimeTrait, StfBlueprint}; use sov_rollup_interface::fork::ForkManager; use sov_state::storage::NativeStorage; -use sov_stf_runner::{FullNodeConfig, InitVariant, ProverConfig}; +use sov_stf_runner::InitVariant; use tokio::sync::broadcast; use tracing::{info, instrument}; @@ -333,7 +334,7 @@ pub trait CitreaRollupBlueprint: RollupBlueprint { storage_manager, init_variant, Arc::new(prover_service), - Some(prover_config), + prover_config, code_commitments_by_spec, fork_manager, soft_confirmation_tx, diff --git a/bin/citrea/src/test_rpc.rs b/bin/citrea/src/test_rpc.rs index 1dcd1f5171..8e274001a7 100644 --- a/bin/citrea/src/test_rpc.rs +++ b/bin/citrea/src/test_rpc.rs @@ -1,3 +1,5 @@ +#[cfg(test)] +use citrea_common::RpcConfig; use hex::ToHex; use reqwest::header::CONTENT_TYPE; use sha2::Digest; @@ -7,8 +9,6 @@ use sov_mock_da::MockDaSpec; #[cfg(test)] use sov_modules_api::DaSpec; use sov_rollup_interface::stf::{Event, SoftConfirmationReceipt, TransactionReceipt}; -#[cfg(test)] -use sov_stf_runner::RpcConfig; struct TestExpect { payload: serde_json::Value, diff --git a/bin/citrea/tests/all_tests.rs b/bin/citrea/tests/all_tests.rs index 72112d8639..6a4aceaf15 100644 --- a/bin/citrea/tests/all_tests.rs +++ b/bin/citrea/tests/all_tests.rs @@ -3,7 +3,6 @@ mod e2e; mod bitcoin_e2e; - mod evm; mod mempool; mod soft_confirmation_rule_enforcer; diff --git a/bin/citrea/tests/bitcoin_e2e/bitcoin.rs b/bin/citrea/tests/bitcoin_e2e/bitcoin.rs deleted file mode 100644 index 6bab7dc8fa..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/bitcoin.rs +++ /dev/null @@ -1,366 +0,0 @@ -use std::collections::HashSet; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use anyhow::{bail, Context}; -use async_trait::async_trait; -use bitcoin::Address; -use bitcoin_da::service::{get_relevant_blobs_from_txs, FINALITY_DEPTH}; -use bitcoin_da::spec::blob::BlobWithSender; -use bitcoincore_rpc::json::AddressType::Bech32m; -use bitcoincore_rpc::{Auth, Client, RpcApi}; -use citrea_primitives::REVEAL_BATCH_PROOF_PREFIX; -use futures::TryStreamExt; -use tokio::process::Command; -use tokio::sync::OnceCell; -use tokio::time::sleep; - -use super::config::BitcoinConfig; -use super::docker::DockerEnv; -use super::framework::TestContext; -use super::node::{LogProvider, Node, Restart, SpawnOutput}; -use super::Result; -use crate::bitcoin_e2e::node::NodeKind; - -pub struct BitcoinNode { - spawn_output: SpawnOutput, - pub config: BitcoinConfig, - client: Client, - gen_addr: OnceCell
, - docker_env: Arc>, -} - -impl BitcoinNode { - pub async fn new(config: &BitcoinConfig, docker: Arc>) -> Result { - let spawn_output = Self::spawn(config, &docker).await?; - - let rpc_url = format!( - "http://127.0.0.1:{}/wallet/{}", - config.rpc_port, - NodeKind::Bitcoin - ); - let client = Client::new( - &rpc_url, - Auth::UserPass(config.rpc_user.clone(), config.rpc_password.clone()), - ) - .await - .context("Failed to create RPC client")?; - - wait_for_rpc_ready(&client, None).await?; - - Ok(Self { - spawn_output, - config: config.clone(), - client, - gen_addr: OnceCell::new(), - docker_env: docker, - }) - } - - pub async fn wait_mempool_len( - &self, - target_len: usize, - timeout: Option, - ) -> Result<()> { - let timeout = timeout.unwrap_or(Duration::from_secs(300)); - let start = Instant::now(); - while start.elapsed() < timeout { - let mempool_len = self.get_raw_mempool().await?.len(); - if mempool_len >= target_len { - return Ok(()); - } - sleep(Duration::from_millis(500)).await; - } - bail!("Timeout waiting for mempool to reach length {}", target_len) - } - - pub async fn fund_wallet(&self, name: String, blocks: u64) -> Result<()> { - let rpc_url = format!("http://127.0.0.1:{}/wallet/{}", self.config.rpc_port, name); - let client = Client::new( - &rpc_url, - Auth::UserPass( - self.config.rpc_user.clone(), - self.config.rpc_password.clone(), - ), - ) - .await - .context("Failed to create RPC client")?; - - let gen_addr = client - .get_new_address(None, Some(Bech32m)) - .await? - .assume_checked(); - client.generate_to_address(blocks, &gen_addr).await?; - Ok(()) - } - - pub async fn get_finalized_height(&self) -> Result { - Ok(self.get_block_count().await? - FINALITY_DEPTH + 1) - } - - pub async fn get_relevant_blobs_from_block(&self, height: u64) -> Result> { - let hash = self.get_block_hash(height).await?; - let block = self.get_block(&hash).await?; - - Ok(get_relevant_blobs_from_txs( - block.txdata, - REVEAL_BATCH_PROOF_PREFIX, - )) - } - - async fn wait_for_shutdown(&self) -> Result<()> { - let timeout_duration = Duration::from_secs(30); - let start = std::time::Instant::now(); - - while start.elapsed() < timeout_duration { - if !self.is_process_running().await? { - println!("Bitcoin daemon has stopped successfully"); - return Ok(()); - } - sleep(Duration::from_millis(200)).await; - } - - bail!("Timeout waiting for Bitcoin daemon to stop") - } - - async fn is_process_running(&self) -> Result { - let data_dir = &self.config.data_dir; - let output = Command::new("pgrep") - .args(["-f", &format!("bitcoind.*{}", data_dir.display())]) - .output() - .await?; - - Ok(output.status.success()) - } - - // Infallible, discard already loaded errors - async fn load_wallets(&self) { - let _ = self.load_wallet(&NodeKind::Bitcoin.to_string()).await; - let _ = self.load_wallet(&NodeKind::Sequencer.to_string()).await; - let _ = self.load_wallet(&NodeKind::Prover.to_string()).await; - } - - // Switch this over to Node signature once we add support for docker to citrea nodes - async fn spawn(config: &BitcoinConfig, docker: &Arc>) -> Result { - match docker.as_ref() { - Some(docker) => docker.spawn(config.into()).await, - None => ::spawn(config), - } - } -} - -#[async_trait] -impl RpcApi for BitcoinNode { - async fn call serde::de::Deserialize<'a>>( - &self, - cmd: &str, - args: &[serde_json::Value], - ) -> bitcoincore_rpc::Result { - self.client.call(cmd, args).await - } - - // Override deprecated generate method. - // Uses or lazy init gen_addr and forward to `generate_to_address` - async fn generate( - &self, - block_num: u64, - _maxtries: Option, - ) -> bitcoincore_rpc::Result> { - let addr = self - .gen_addr - .get_or_init(|| async { - self.client - .get_new_address(None, Some(Bech32m)) - .await - .expect("Failed to generate address") - .assume_checked() - }) - .await; - - self.generate_to_address(block_num, addr).await - } -} - -impl Node for BitcoinNode { - type Config = BitcoinConfig; - type Client = Client; - - fn spawn(config: &Self::Config) -> Result { - let args = config.args(); - println!("Running bitcoind with args : {args:?}"); - - Command::new("bitcoind") - .args(&args) - .kill_on_drop(true) - .envs(config.env.clone()) - .spawn() - .context("Failed to spawn bitcoind process") - .map(SpawnOutput::Child) - } - - fn spawn_output(&mut self) -> &mut SpawnOutput { - &mut self.spawn_output - } - - async fn wait_for_ready(&self, timeout: Option) -> Result<()> { - println!("Waiting for ready"); - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(30)); - while start.elapsed() < timeout { - if wait_for_rpc_ready(&self.client, Some(timeout)) - .await - .is_ok() - { - return Ok(()); - } - tokio::time::sleep(Duration::from_millis(500)).await; - } - anyhow::bail!("Node failed to become ready within the specified timeout") - } - - fn client(&self) -> &Self::Client { - &self.client - } - - fn env(&self) -> Vec<(&'static str, &'static str)> { - self.config.env.clone() - } - - fn config_mut(&mut self) -> &mut Self::Config { - &mut self.config - } -} - -impl Restart for BitcoinNode { - async fn wait_until_stopped(&mut self) -> Result<()> { - self.client.stop().await?; - self.stop().await?; - - match &self.spawn_output { - SpawnOutput::Child(_) => self.wait_for_shutdown().await, - SpawnOutput::Container(output) => { - let Some(env) = self.docker_env.as_ref() else { - bail!("Missing docker environment") - }; - env.docker.stop_container(&output.id, None).await?; - - env.docker - .wait_container::(&output.id, None) - .try_collect::>() - .await?; - env.docker.remove_container(&output.id, None).await?; - println!("Docker container {} succesfully removed", output.id); - Ok(()) - } - } - } - - async fn start(&mut self, config: Option) -> Result<()> { - if let Some(config) = config { - self.config = config - } - self.spawn_output = Self::spawn(&self.config, &self.docker_env).await?; - - self.wait_for_ready(None).await?; - - // Reload wallets after restart - self.load_wallets().await; - - Ok(()) - } -} - -impl LogProvider for BitcoinNode { - fn kind(&self) -> NodeKind { - NodeKind::Bitcoin - } - - fn log_path(&self) -> PathBuf { - self.config.data_dir.join("regtest").join("debug.log") - } -} - -pub struct BitcoinNodeCluster { - inner: Vec, -} - -impl BitcoinNodeCluster { - pub async fn new(ctx: &TestContext) -> Result { - let n_nodes = ctx.config.test_case.n_nodes; - let mut cluster = Self { - inner: Vec::with_capacity(n_nodes), - }; - for config in ctx.config.bitcoin.iter() { - let node = BitcoinNode::new(config, Arc::clone(&ctx.docker)).await?; - cluster.inner.push(node) - } - - Ok(cluster) - } - - pub async fn stop_all(&mut self) -> Result<()> { - for node in &mut self.inner { - RpcApi::stop(node).await?; - node.stop().await?; - } - Ok(()) - } - - pub async fn wait_for_sync(&self, timeout: Duration) -> Result<()> { - let start = Instant::now(); - while start.elapsed() < timeout { - let mut heights = HashSet::new(); - for node in &self.inner { - let height = node.get_block_count().await?; - heights.insert(height); - } - - if heights.len() == 1 { - return Ok(()); - } - - sleep(Duration::from_secs(1)).await; - } - bail!("Nodes failed to sync within the specified timeout") - } - - // Connect all bitcoin nodes between them - pub async fn connect_nodes(&self) -> Result<()> { - for (i, from_node) in self.inner.iter().enumerate() { - for (j, to_node) in self.inner.iter().enumerate() { - if i != j { - let ip = match &to_node.spawn_output { - SpawnOutput::Container(container) => container.ip.clone(), - _ => "127.0.0.1".to_string(), - }; - - let add_node_arg = format!("{}:{}", ip, to_node.config.p2p_port); - from_node.add_node(&add_node_arg).await?; - } - } - } - Ok(()) - } - - pub fn get(&self, index: usize) -> Option<&BitcoinNode> { - self.inner.get(index) - } - - #[allow(unused)] - pub fn get_mut(&mut self, index: usize) -> Option<&mut BitcoinNode> { - self.inner.get_mut(index) - } -} - -async fn wait_for_rpc_ready(client: &Client, timeout: Option) -> Result<()> { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(300)); - while start.elapsed() < timeout { - match client.get_blockchain_info().await { - Ok(_) => return Ok(()), - Err(_) => sleep(Duration::from_millis(500)).await, - } - } - Err(anyhow::anyhow!("Timeout waiting for RPC to be ready")) -} diff --git a/bin/citrea/tests/bitcoin_e2e/tests/bitcoin_test.rs b/bin/citrea/tests/bitcoin_e2e/bitcoin_test.rs similarity index 92% rename from bin/citrea/tests/bitcoin_e2e/tests/bitcoin_test.rs rename to bin/citrea/tests/bitcoin_e2e/bitcoin_test.rs index 4cd0580b72..166838eb15 100644 --- a/bin/citrea/tests/bitcoin_e2e/tests/bitcoin_test.rs +++ b/bin/citrea/tests/bitcoin_e2e/bitcoin_test.rs @@ -4,12 +4,11 @@ use anyhow::bail; use async_trait::async_trait; use bitcoincore_rpc::json::IndexStatus; use bitcoincore_rpc::RpcApi; - -use crate::bitcoin_e2e::config::{BitcoinConfig, TestCaseConfig}; -use crate::bitcoin_e2e::framework::TestFramework; -use crate::bitcoin_e2e::node::Restart; -use crate::bitcoin_e2e::test_case::{TestCase, TestCaseRunner}; -use crate::bitcoin_e2e::Result; +use citrea_e2e::config::{BitcoinConfig, TestCaseConfig}; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::traits::Restart; +use citrea_e2e::Result; struct BasicSyncTest; diff --git a/bin/citrea/tests/bitcoin_e2e/config/bitcoin.rs b/bin/citrea/tests/bitcoin_e2e/config/bitcoin.rs deleted file mode 100644 index 447bad3e6e..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/bitcoin.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::path::PathBuf; - -use bitcoin::Network; -use tempfile::TempDir; - -#[derive(Debug, Clone)] -pub struct BitcoinConfig { - pub p2p_port: u16, - pub rpc_port: u16, - pub rpc_user: String, - pub rpc_password: String, - pub data_dir: PathBuf, - pub extra_args: Vec<&'static str>, - pub network: Network, - pub docker_image: Option, - pub env: Vec<(&'static str, &'static str)>, - pub idx: usize, -} - -impl Default for BitcoinConfig { - fn default() -> Self { - Self { - p2p_port: 0, - rpc_port: 0, - rpc_user: "user".to_string(), - rpc_password: "password".to_string(), - data_dir: TempDir::new() - .expect("Failed to create temporary directory") - .into_path(), - extra_args: Vec::new(), - network: Network::Regtest, - docker_image: Some("bitcoin/bitcoin:latest".to_string()), - env: Vec::new(), - idx: 0, - } - } -} - -impl BitcoinConfig { - fn base_args(&self) -> Vec { - vec![ - "-regtest".to_string(), - format!("-datadir={}", self.data_dir.display()), - format!("-port={}", self.p2p_port), - format!("-rpcport={}", self.rpc_port), - format!("-rpcuser={}", self.rpc_user), - format!("-rpcpassword={}", self.rpc_password), - "-server".to_string(), - "-daemonwait".to_string(), - "-txindex".to_string(), - "-addresstype=bech32m".to_string(), - "-debug=net".to_string(), - "-debug=rpc".to_string(), - ] - } - - pub fn args(&self) -> Vec { - [ - self.base_args(), - self.extra_args.iter().map(|&s| s.to_string()).collect(), - ] - .concat() - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/config/docker.rs b/bin/citrea/tests/bitcoin_e2e/config/docker.rs deleted file mode 100644 index 4de4426601..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/docker.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::path::PathBuf; - -use super::{BitcoinConfig, FullSequencerConfig}; -use crate::bitcoin_e2e::utils::get_genesis_path; - -#[derive(Debug)] -pub struct VolumeConfig { - pub name: String, - pub target: String, -} - -#[derive(Debug)] -pub struct DockerConfig { - pub ports: Vec, - pub image: String, - pub cmd: Vec, - pub log_path: PathBuf, - pub volume: VolumeConfig, -} - -impl From<&BitcoinConfig> for DockerConfig { - fn from(v: &BitcoinConfig) -> Self { - let mut args = v.args(); - - // Docker specific args - args.extend([ - "-rpcallowip=0.0.0.0/0".to_string(), - "-rpcbind=0.0.0.0".to_string(), - "-daemonwait=0".to_string(), - ]); - - Self { - ports: vec![v.rpc_port, v.p2p_port], - image: v - .docker_image - .clone() - .unwrap_or_else(|| "bitcoin/bitcoin:latest".to_string()), - cmd: args, - log_path: v.data_dir.join("regtest").join("debug.log"), - volume: VolumeConfig { - name: format!("bitcoin-{}", v.idx), - target: "/home/bitcoin/.bitcoin".to_string(), - }, - } - } -} - -impl From<&FullSequencerConfig> for DockerConfig { - fn from(v: &FullSequencerConfig) -> Self { - let args = vec![ - "--da-layer".to_string(), - "bitcoin".to_string(), - "--rollup-config-path".to_string(), - "sequencer_rollup_config.toml".to_string(), - "--sequencer-config-path".to_string(), - "sequencer_config.toml".to_string(), - "--genesis-paths".to_string(), - get_genesis_path(v.dir.parent().expect("Couldn't get parent dir")) - .display() - .to_string(), - ]; - - Self { - ports: vec![v.rollup.rpc.bind_port], - image: v - .docker_image - .clone() - .unwrap_or_else(|| "citrea:latest".to_string()), // Default to local image - cmd: args, - log_path: v.dir.join("stdout"), - volume: VolumeConfig { - name: "sequencer".to_string(), - target: "/sequencer/data".to_string(), - }, - } - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/config/mod.rs b/bin/citrea/tests/bitcoin_e2e/config/mod.rs deleted file mode 100644 index 9bb8453712..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -mod bitcoin; -mod docker; -mod rollup; -mod test; -mod test_case; -mod utils; - -use std::path::PathBuf; - -pub use bitcoin::BitcoinConfig; -pub use citrea_sequencer::SequencerConfig; -pub use docker::DockerConfig; -pub use rollup::{default_rollup_config, RollupConfig}; -pub use sov_stf_runner::ProverConfig; -pub use test::TestConfig; -pub use test_case::{TestCaseConfig, TestCaseEnv}; -pub use utils::config_to_file; - -#[derive(Clone, Debug)] -pub struct FullL2NodeConfig { - pub node: T, - pub rollup: RollupConfig, - pub docker_image: Option, - pub dir: PathBuf, - pub env: Vec<(&'static str, &'static str)>, -} - -pub type FullSequencerConfig = FullL2NodeConfig; -pub type FullProverConfig = FullL2NodeConfig; -pub type FullFullNodeConfig = FullL2NodeConfig<()>; diff --git a/bin/citrea/tests/bitcoin_e2e/config/rollup.rs b/bin/citrea/tests/bitcoin_e2e/config/rollup.rs deleted file mode 100644 index 3d83d99fb0..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/rollup.rs +++ /dev/null @@ -1,68 +0,0 @@ -use bitcoin_da::service::BitcoinServiceConfig; -use sov_stf_runner::{FullNodeConfig, RollupPublicKeys, RpcConfig, StorageConfig}; -use tempfile::TempDir; - -use super::BitcoinConfig; -use crate::bitcoin_e2e::utils::get_tx_backup_dir; -pub type RollupConfig = FullNodeConfig; - -pub fn default_rollup_config() -> RollupConfig { - RollupConfig { - rpc: RpcConfig { - bind_host: "127.0.0.1".into(), - bind_port: 0, - max_connections: 100, - max_request_body_size: 10 * 1024 * 1024, - max_response_body_size: 10 * 1024 * 1024, - batch_requests_limit: 50, - enable_subscriptions: true, - max_subscriptions_per_connection: 100, - }, - storage: StorageConfig { - path: TempDir::new() - .expect("Failed to create temporary directory") - .into_path(), - db_max_open_files: None, - }, - runner: None, - da: BitcoinServiceConfig { - node_url: String::new(), - node_username: String::from("user"), - node_password: String::from("password"), - network: bitcoin::Network::Regtest, - da_private_key: None, - tx_backup_dir: get_tx_backup_dir(), - }, - public_keys: RollupPublicKeys { - sequencer_public_key: vec![ - 32, 64, 64, 227, 100, 193, 15, 43, 236, 156, 31, 229, 0, 161, 205, 76, 36, 124, - 137, 214, 80, 160, 30, 215, 232, 44, 171, 168, 103, 135, 124, 33, - ], - // private key [4, 95, 252, 129, 163, 193, 253, 179, 175, 19, 89, 219, 242, 209, 20, 176, 179, 239, 191, 127, 41, 204, 156, 93, 160, 18, 103, 170, 57, 210, 199, 141] - // Private Key (WIF): KwNDSCvKqZqFWLWN1cUzvMiJQ7ck6ZKqR6XBqVKyftPZtvmbE6YD - sequencer_da_pub_key: vec![ - 3, 136, 195, 18, 11, 187, 25, 37, 38, 109, 184, 237, 247, 208, 131, 219, 162, 70, - 35, 174, 234, 47, 239, 247, 60, 51, 174, 242, 247, 112, 186, 222, 30, - ], - // private key [117, 186, 249, 100, 208, 116, 89, 70, 0, 54, 110, 91, 17, 26, 29, 168, 248, 107, 46, 254, 45, 34, 218, 81, 200, 216, 33, 38, 160, 252, 172, 114] - // Private Key (WIF): L1AZdJXzDGGENBBPZGSL7dKJnwn5xSKqzszgK6CDwiBGThYQEVTo - prover_da_pub_key: vec![ - 2, 138, 232, 157, 214, 46, 7, 210, 235, 33, 105, 239, 71, 169, 105, 233, 239, 84, - 172, 112, 13, 54, 9, 206, 106, 138, 251, 218, 15, 28, 137, 112, 127, - ], - }, - } -} - -impl From for BitcoinServiceConfig { - fn from(v: BitcoinConfig) -> Self { - Self { - node_url: format!("127.0.0.1:{}", v.rpc_port), - node_username: v.rpc_user, - node_password: v.rpc_password, - network: v.network, - da_private_key: None, - tx_backup_dir: "".to_string(), - } - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/config/test.rs b/bin/citrea/tests/bitcoin_e2e/config/test.rs deleted file mode 100644 index 3f33d4ff92..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/test.rs +++ /dev/null @@ -1,12 +0,0 @@ -use super::bitcoin::BitcoinConfig; -use super::test_case::TestCaseConfig; -use super::{FullFullNodeConfig, FullProverConfig, FullSequencerConfig}; - -#[derive(Clone)] -pub struct TestConfig { - pub test_case: TestCaseConfig, - pub bitcoin: Vec, - pub sequencer: FullSequencerConfig, - pub prover: FullProverConfig, - pub full_node: FullFullNodeConfig, -} diff --git a/bin/citrea/tests/bitcoin_e2e/config/test_case.rs b/bin/citrea/tests/bitcoin_e2e/config/test_case.rs deleted file mode 100644 index caaeca5e6f..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/test_case.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::path::PathBuf; -use std::time::Duration; - -use tempfile::TempDir; - -#[derive(Clone, Default)] -pub struct TestCaseEnv { - pub test: Vec<(&'static str, &'static str)>, - pub full_node: Vec<(&'static str, &'static str)>, - pub sequencer: Vec<(&'static str, &'static str)>, - pub prover: Vec<(&'static str, &'static str)>, - pub bitcoin: Vec<(&'static str, &'static str)>, -} - -impl TestCaseEnv { - // Base env that should apply to every test cases - fn base_env() -> Vec<(&'static str, &'static str)> { - vec![("NO_COLOR", "1")] - } - - fn test_env(&self) -> Vec<(&'static str, &'static str)> { - [Self::base_env(), self.test.clone()].concat() - } - - pub fn sequencer(&self) -> Vec<(&'static str, &'static str)> { - [self.test_env(), self.sequencer.clone()].concat() - } - - pub fn prover(&self) -> Vec<(&'static str, &'static str)> { - [self.test_env(), self.prover.clone()].concat() - } - - pub fn full_node(&self) -> Vec<(&'static str, &'static str)> { - [self.test_env(), self.full_node.clone()].concat() - } - - pub fn bitcoin(&self) -> Vec<(&'static str, &'static str)> { - [self.test_env(), self.bitcoin.clone()].concat() - } -} - -#[derive(Clone)] -pub struct TestCaseConfig { - pub n_nodes: usize, - pub with_sequencer: bool, - pub with_full_node: bool, - pub with_prover: bool, - #[allow(unused)] - pub timeout: Duration, - pub dir: PathBuf, - pub docker: bool, - // Either a relative dir from workspace root, i.e. "./resources/genesis/devnet" - // Or an absolute path. - // Defaults to resources/genesis/bitcoin-regtest - pub genesis_dir: Option, -} - -impl Default for TestCaseConfig { - fn default() -> Self { - TestCaseConfig { - n_nodes: 1, - with_sequencer: true, - with_prover: false, - with_full_node: false, - timeout: Duration::from_secs(60), - dir: TempDir::new() - .expect("Failed to create temporary directory") - .into_path(), - docker: std::env::var("USE_DOCKER").map_or(false, |v| v.parse().unwrap_or(false)), - genesis_dir: None, - } - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/config/utils.rs b/bin/citrea/tests/bitcoin_e2e/config/utils.rs deleted file mode 100644 index 49beb15630..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/config/utils.rs +++ /dev/null @@ -1,14 +0,0 @@ -use std::path::Path; - -use serde::Serialize; - -pub fn config_to_file(config: &C, path: &P) -> std::io::Result<()> -where - C: Serialize, - P: AsRef, -{ - let toml = - toml::to_string(config).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; - std::fs::write(path, toml)?; - Ok(()) -} diff --git a/bin/citrea/tests/bitcoin_e2e/docker.rs b/bin/citrea/tests/bitcoin_e2e/docker.rs deleted file mode 100644 index 44194d1150..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/docker.rs +++ /dev/null @@ -1,282 +0,0 @@ -use std::collections::{HashMap, HashSet}; -use std::io::{stdout, Write}; -use std::path::PathBuf; - -use anyhow::{anyhow, Context, Result}; -use bollard::container::{Config, LogOutput, LogsOptions, NetworkingConfig}; -use bollard::image::CreateImageOptions; -use bollard::models::{EndpointSettings, Mount, PortBinding}; -use bollard::network::CreateNetworkOptions; -use bollard::secret::MountTypeEnum; -use bollard::service::HostConfig; -use bollard::volume::CreateVolumeOptions; -use bollard::Docker; -use futures::StreamExt; -use tokio::fs::File; -use tokio::io::AsyncWriteExt; -use tokio::task::JoinHandle; - -use super::config::DockerConfig; -use super::node::SpawnOutput; -use super::utils::generate_test_id; -use crate::bitcoin_e2e::node::ContainerSpawnOutput; - -pub struct DockerEnv { - pub docker: Docker, - pub network_id: String, - pub network_name: String, - id: String, - volumes: HashSet, -} - -impl DockerEnv { - pub async fn new(n_nodes: usize) -> Result { - let docker = - Docker::connect_with_local_defaults().context("Failed to connect to Docker")?; - let test_id = generate_test_id(); - let (network_id, network_name) = Self::create_network(&docker, &test_id).await?; - let volumes = Self::create_volumes(&docker, &test_id, n_nodes).await?; - - Ok(Self { - docker, - network_id, - network_name, - id: test_id, - volumes, - }) - } - - async fn create_volumes( - docker: &Docker, - test_case_id: &str, - n_nodes: usize, - ) -> Result> { - let volume_configs = vec![("bitcoin", n_nodes)]; - let mut volumes = HashSet::new(); - - for (name, n) in volume_configs { - for i in 0..n { - let volume_name = format!("{name}-{i}-{test_case_id}"); - docker - .create_volume(CreateVolumeOptions { - name: volume_name.clone(), - driver: "local".to_string(), - driver_opts: HashMap::new(), - labels: HashMap::new(), - }) - .await?; - - volumes.insert(volume_name); - } - } - - Ok(volumes) - } - - async fn create_network(docker: &Docker, test_case_id: &str) -> Result<(String, String)> { - let network_name = format!("test_network_{}", test_case_id); - let options = CreateNetworkOptions { - name: network_name.clone(), - check_duplicate: true, - driver: "bridge".to_string(), - ..Default::default() - }; - - let id = docker - .create_network(options) - .await? - .id - .context("Error getting network id")?; - Ok((id, network_name)) - } - - pub async fn spawn(&self, config: DockerConfig) -> Result { - println!("Spawning docker with config {config:#?}"); - let exposed_ports: HashMap> = config - .ports - .iter() - .map(|port| (format!("{}/tcp", port), HashMap::new())) - .collect(); - - let port_bindings: HashMap>> = config - .ports - .iter() - .map(|port| { - ( - format!("{}/tcp", port), - Some(vec![PortBinding { - host_ip: Some("0.0.0.0".to_string()), - host_port: Some(port.to_string()), - }]), - ) - }) - .collect(); - - let mut network_config = HashMap::new(); - network_config.insert(self.network_id.clone(), EndpointSettings::default()); - - let volume_name = format!("{}-{}", config.volume.name, self.id); - let mount = Mount { - target: Some(config.volume.target.clone()), - source: Some(volume_name), - typ: Some(MountTypeEnum::VOLUME), - ..Default::default() - }; - - let container_config = Config { - image: Some(config.image), - cmd: Some(config.cmd), - exposed_ports: Some(exposed_ports), - host_config: Some(HostConfig { - port_bindings: Some(port_bindings), - // binds: Some(vec![config.dir]), - mounts: Some(vec![mount]), - ..Default::default() - }), - networking_config: Some(NetworkingConfig { - endpoints_config: network_config, - }), - tty: Some(true), - ..Default::default() - }; - - let image = container_config - .image - .as_ref() - .context("Image not specified in config")?; - self.ensure_image_exists(image).await?; - - // println!("options :{options:?}"); - // println!("config :{container_config:?}"); - - let container = self - .docker - .create_container::(None, container_config) - .await - .map_err(|e| anyhow!("Failed to create Docker container {e}"))?; - - self.docker - .start_container::(&container.id, None) - .await - .context("Failed to start Docker container")?; - - let inspect_result = self.docker.inspect_container(&container.id, None).await?; - let ip_address = inspect_result - .network_settings - .and_then(|ns| ns.networks) - .and_then(|networks| { - networks - .values() - .next() - .and_then(|network| network.ip_address.clone()) - }) - .context("Failed to get container IP address")?; - - // Extract container logs to host - // This spawns a background task to continuously stream logs from the container. - // The task will run until the container is stopped or removed during cleanup. - Self::extract_container_logs(self.docker.clone(), container.id.clone(), config.log_path); - - Ok(SpawnOutput::Container(ContainerSpawnOutput { - id: container.id, - ip: ip_address, - })) - } - - async fn ensure_image_exists(&self, image: &str) -> Result<()> { - let images = self - .docker - .list_images::(None) - .await - .context("Failed to list Docker images")?; - if images - .iter() - .any(|img| img.repo_tags.contains(&image.to_string())) - { - return Ok(()); - } - - println!("Pulling image: {}", image); - let options = Some(CreateImageOptions { - from_image: image, - ..Default::default() - }); - - let mut stream = self.docker.create_image(options, None, None); - while let Some(result) = stream.next().await { - match result { - Ok(info) => { - if let (Some(status), Some(progress)) = (info.status, info.progress) { - print!("\r{}: {} ", status, progress); - stdout().flush().unwrap(); - } - } - Err(e) => return Err(anyhow::anyhow!("Failed to pull image: {}", e)), - } - } - println!("Image succesfully pulled"); - - Ok(()) - } - - pub async fn cleanup(&self) -> Result<()> { - let containers = self.docker.list_containers::(None).await?; - for container in containers { - if let (Some(id), Some(networks)) = ( - container.id, - container.network_settings.and_then(|ns| ns.networks), - ) { - if networks.contains_key(&self.network_name) { - self.docker.stop_container(&id, None).await?; - self.docker.remove_container(&id, None).await?; - } - } - } - - self.docker.remove_network(&self.network_name).await?; - - for volume_name in &self.volumes { - self.docker.remove_volume(volume_name, None).await?; - } - - Ok(()) - } - - fn extract_container_logs( - docker: Docker, - container_id: String, - log_path: PathBuf, - ) -> JoinHandle> { - tokio::spawn(async move { - if let Some(parent) = log_path.parent() { - tokio::fs::create_dir_all(parent) - .await - .context("Failed to create log directory")?; - } - let mut log_file = File::create(log_path) - .await - .context("Failed to create log file")?; - let mut log_stream = docker.logs::( - &container_id, - Some(LogsOptions { - follow: true, - stdout: true, - stderr: true, - ..Default::default() - }), - ); - - while let Some(Ok(log_output)) = log_stream.next().await { - let log_line = match log_output { - LogOutput::Console { message } | LogOutput::StdOut { message } => message, - _ => continue, - }; - log_file - .write_all(&log_line) - .await - .context("Failed to write log line")?; - } - Ok(()) - }) - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/framework.rs b/bin/citrea/tests/bitcoin_e2e/framework.rs deleted file mode 100644 index eae3ce926c..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/framework.rs +++ /dev/null @@ -1,200 +0,0 @@ -use std::future::Future; -use std::sync::Arc; - -use bitcoincore_rpc::RpcApi; - -use super::bitcoin::BitcoinNodeCluster; -use super::config::TestConfig; -use super::docker::DockerEnv; -use super::full_node::FullNode; -use super::node::{LogProvider, LogProviderErased, Node, NodeKind}; -use super::sequencer::Sequencer; -use super::Result; -use crate::bitcoin_e2e::prover::Prover; -use crate::bitcoin_e2e::utils::tail_file; - -pub struct TestContext { - pub config: TestConfig, - pub docker: Arc>, -} - -impl TestContext { - async fn new(config: TestConfig) -> Self { - let docker = if config.test_case.docker { - Some(DockerEnv::new(config.test_case.n_nodes).await.unwrap()) - } else { - None - }; - Self { - config, - docker: Arc::new(docker), - } - } -} - -pub struct TestFramework { - ctx: TestContext, - pub bitcoin_nodes: BitcoinNodeCluster, - pub sequencer: Option, - pub prover: Option, - pub full_node: Option, - show_logs: bool, - pub initial_da_height: u64, -} - -async fn create_optional(pred: bool, f: impl Future>) -> Result> { - if pred { - Ok(Some(f.await?)) - } else { - Ok(None) - } -} - -impl TestFramework { - pub async fn new(config: TestConfig) -> Result { - anyhow::ensure!( - config.test_case.n_nodes > 0, - "At least one bitcoin node has to be running" - ); - - let ctx = TestContext::new(config).await; - - let bitcoin_nodes = BitcoinNodeCluster::new(&ctx).await?; - - // tokio::time::sleep(std::time::Duration::from_secs(30)).await; - Ok(Self { - bitcoin_nodes, - sequencer: None, - prover: None, - full_node: None, - ctx, - show_logs: true, - initial_da_height: 0, - }) - } - - pub async fn init_nodes(&mut self) -> Result<()> { - // Has to initialize sequencer first since prover and full node depend on it - self.sequencer = create_optional( - self.ctx.config.test_case.with_sequencer, - Sequencer::new(&self.ctx), - ) - .await?; - - (self.prover, self.full_node) = tokio::try_join!( - create_optional( - self.ctx.config.test_case.with_prover, - Prover::new(&self.ctx) - ), - create_optional( - self.ctx.config.test_case.with_full_node, - FullNode::new(&self.ctx) - ), - )?; - - Ok(()) - } - - fn get_nodes_as_log_provider(&self) -> Vec<&dyn LogProviderErased> { - vec![ - self.bitcoin_nodes.get(0).map(LogProvider::as_erased), - self.sequencer.as_ref().map(LogProvider::as_erased), - self.full_node.as_ref().map(LogProvider::as_erased), - self.prover.as_ref().map(LogProvider::as_erased), - ] - .into_iter() - .flatten() - .collect() - } - - pub fn show_log_paths(&self) { - if self.show_logs { - println!( - "Logs available at {}", - self.ctx.config.test_case.dir.display() - ); - - for node in self.get_nodes_as_log_provider() { - println!( - "{} logs available at : {}", - node.kind(), - node.log_path().display() - ); - } - } - } - - pub fn dump_log(&self) -> Result<()> { - println!("Dumping logs:"); - - let n_lines = std::env::var("TAIL_N_LINES") - .ok() - .and_then(|v| v.parse::().ok()) - .unwrap_or(25); - for node in self.get_nodes_as_log_provider() { - println!("{} logs (last {n_lines} lines):", node.kind()); - if let Err(e) = tail_file(&node.log_path(), n_lines) { - eprint!("{e}"); - } - } - Ok(()) - } - - pub async fn stop(&mut self) -> Result<()> { - println!("Stopping framework..."); - - if let Some(sequencer) = &mut self.sequencer { - let _ = sequencer.stop().await; - println!("Successfully stopped sequencer"); - } - - if let Some(prover) = &mut self.prover { - let _ = prover.stop().await; - println!("Successfully stopped prover"); - } - - if let Some(full_node) = &mut self.full_node { - let _ = full_node.stop().await; - println!("Successfully stopped full_node"); - } - - let _ = self.bitcoin_nodes.stop_all().await; - println!("Successfully stopped bitcoin nodes"); - - if let Some(docker) = self.ctx.docker.as_ref() { - let _ = docker.cleanup().await; - println!("Successfully cleaned docker"); - } - - Ok(()) - } - - pub async fn fund_da_wallets(&mut self) -> Result<()> { - let da = self.bitcoin_nodes.get(0).unwrap(); - - da.create_wallet(&NodeKind::Sequencer.to_string(), None, None, None, None) - .await?; - da.create_wallet(&NodeKind::Prover.to_string(), None, None, None, None) - .await?; - da.create_wallet(&NodeKind::Bitcoin.to_string(), None, None, None, None) - .await?; - - let blocks_to_mature = 100; - let blocks_to_fund = 25; - if self.ctx.config.test_case.with_sequencer { - da.fund_wallet(NodeKind::Sequencer.to_string(), blocks_to_fund) - .await?; - } - - if self.ctx.config.test_case.with_prover { - da.fund_wallet(NodeKind::Prover.to_string(), blocks_to_fund) - .await?; - } - da.fund_wallet(NodeKind::Bitcoin.to_string(), blocks_to_fund) - .await?; - - da.generate(blocks_to_mature, None).await?; - self.initial_da_height = da.get_block_count().await?; - Ok(()) - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/full_node.rs b/bin/citrea/tests/bitcoin_e2e/full_node.rs deleted file mode 100644 index d087203758..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/full_node.rs +++ /dev/null @@ -1,181 +0,0 @@ -use std::fs::File; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::process::Stdio; - -use anyhow::{bail, Context}; -use sov_rollup_interface::rpc::{SequencerCommitmentResponse, VerifiedProofResponse}; -use tokio::process::Command; -use tokio::time::{sleep, Duration, Instant}; - -use super::config::{config_to_file, FullFullNodeConfig, TestConfig}; -use super::framework::TestContext; -use super::node::{LogProvider, Node, NodeKind, SpawnOutput}; -use super::utils::{get_citrea_path, get_stderr_path, get_stdout_path, retry}; -use super::Result; -use crate::bitcoin_e2e::utils::get_genesis_path; -use crate::evm::make_test_client; -use crate::test_client::TestClient; - -#[allow(unused)] -pub struct FullNode { - spawn_output: SpawnOutput, - config: FullFullNodeConfig, - pub client: Box, -} - -impl FullNode { - pub async fn new(ctx: &TestContext) -> Result { - let TestConfig { - full_node: full_node_config, - .. - } = &ctx.config; - - let spawn_output = Self::spawn(full_node_config)?; - - let socket_addr = SocketAddr::new( - full_node_config - .rollup - .rpc - .bind_host - .parse() - .context("Failed to parse bind host")?, - full_node_config.rollup.rpc.bind_port, - ); - let client = retry(|| async { make_test_client(socket_addr).await }, None).await?; - - Ok(Self { - spawn_output, - config: full_node_config.clone(), - client, - }) - } - - pub async fn wait_for_sequencer_commitments( - &self, - height: u64, - timeout: Option, - ) -> Result> { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(30)); - - loop { - if start.elapsed() >= timeout { - bail!("FullNode failed to get sequencer commitments within the specified timeout"); - } - - match self - .client - .ledger_get_sequencer_commitments_on_slot_by_number(height) - .await - { - Ok(Some(commitments)) => return Ok(commitments), - Ok(None) => sleep(Duration::from_millis(500)).await, - Err(e) => bail!("Error fetching sequencer commitments: {}", e), - } - } - } - - pub async fn wait_for_zkproofs( - &self, - height: u64, - timeout: Option, - ) -> Result> { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(30)); - - loop { - if start.elapsed() >= timeout { - bail!("FullNode failed to get zkproofs within the specified timeout"); - } - - match self - .client - .ledger_get_verified_proofs_by_slot_height(height) - .await - { - Some(proofs) => return Ok(proofs), - None => sleep(Duration::from_millis(500)).await, - } - } - } -} - -impl Node for FullNode { - type Config = FullFullNodeConfig; - type Client = TestClient; - - fn spawn(config: &Self::Config) -> Result { - let citrea = get_citrea_path(); - let dir = &config.dir; - - let stdout_file = - File::create(get_stdout_path(dir)).context("Failed to create stdout file")?; - let stderr_file = - File::create(get_stderr_path(dir)).context("Failed to create stderr file")?; - - let rollup_config_path = dir.join("full_node_rollup_config.toml"); - config_to_file(&config.rollup, &rollup_config_path)?; - - Command::new(citrea) - .arg("--da-layer") - .arg("bitcoin") - .arg("--rollup-config-path") - .arg(rollup_config_path) - .arg("--genesis-paths") - .arg(get_genesis_path( - dir.parent().expect("Couldn't get parent dir"), - )) - .envs(config.env.clone()) - .stdout(Stdio::from(stdout_file)) - .stderr(Stdio::from(stderr_file)) - .kill_on_drop(true) - .spawn() - .context("Failed to spawn citrea process") - .map(SpawnOutput::Child) - } - - fn spawn_output(&mut self) -> &mut SpawnOutput { - &mut self.spawn_output - } - - async fn wait_for_ready(&self, timeout: Option) -> Result<()> { - let start = Instant::now(); - - let timeout = timeout.unwrap_or(Duration::from_secs(30)); - while start.elapsed() < timeout { - if self - .client - .ledger_get_head_soft_confirmation() - .await - .is_ok() - { - return Ok(()); - } - sleep(Duration::from_millis(500)).await; - } - bail!("FullNode failed to become ready within the specified timeout") - } - - fn client(&self) -> &Self::Client { - &self.client - } - - fn env(&self) -> Vec<(&'static str, &'static str)> { - self.config.env.clone() - } - - fn config_mut(&mut self) -> &mut Self::Config { - &mut self.config - } -} - -impl LogProvider for FullNode { - fn kind(&self) -> NodeKind { - NodeKind::FullNode - } - - fn log_path(&self) -> PathBuf { - get_stdout_path(&self.config.dir) - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/tests/mempool_accept.rs b/bin/citrea/tests/bitcoin_e2e/mempool_accept.rs similarity index 83% rename from bin/citrea/tests/bitcoin_e2e/tests/mempool_accept.rs rename to bin/citrea/tests/bitcoin_e2e/mempool_accept.rs index 1e715b8142..ab2d329905 100644 --- a/bin/citrea/tests/bitcoin_e2e/tests/mempool_accept.rs +++ b/bin/citrea/tests/bitcoin_e2e/mempool_accept.rs @@ -2,11 +2,11 @@ use async_trait::async_trait; use bitcoin_da::service::FINALITY_DEPTH; use bitcoincore_rpc::RpcApi; -use crate::bitcoin_e2e::config::BitcoinConfig; -use crate::bitcoin_e2e::framework::TestFramework; -use crate::bitcoin_e2e::node::L2Node; -use crate::bitcoin_e2e::test_case::{TestCase, TestCaseRunner}; -use crate::bitcoin_e2e::Result; +use citrea_e2e::config::BitcoinConfig; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::traits::L2Node; +use citrea_e2e::Result; struct MempoolAcceptTest; @@ -31,7 +31,7 @@ impl TestCase for MempoolAcceptTest { // publish min_soft_conf_per_commitment - 1 confirmations, no commitments should be sent for _ in 0..min_soft_conf_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } sequencer .wait_for_l2_height(min_soft_conf_per_commitment, None) diff --git a/bin/citrea/tests/bitcoin_e2e/mod.rs b/bin/citrea/tests/bitcoin_e2e/mod.rs index 9cf7f4d7b1..e951a6b23f 100644 --- a/bin/citrea/tests/bitcoin_e2e/mod.rs +++ b/bin/citrea/tests/bitcoin_e2e/mod.rs @@ -1,15 +1,5 @@ -mod bitcoin; -pub mod config; -mod docker; -pub mod framework; -mod full_node; -pub mod node; -mod prover; -mod sequencer; -pub mod test_case; - -mod tests; - -mod utils; - -pub(crate) type Result = anyhow::Result; +pub mod bitcoin_test; +// pub mod mempool_accept; +pub mod prover_test; +pub mod sequencer_commitments; +pub mod sequencer_test; diff --git a/bin/citrea/tests/bitcoin_e2e/node.rs b/bin/citrea/tests/bitcoin_e2e/node.rs deleted file mode 100644 index d81ddb144b..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/node.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::fmt; -use std::path::PathBuf; -use std::time::Duration; - -use anyhow::Context; -use bollard::container::StopContainerOptions; -use bollard::Docker; -use tokio::process::Child; - -use super::Result; -use crate::test_client::TestClient; -use crate::test_helpers::wait_for_l2_block; - -#[derive(Debug)] -pub enum NodeKind { - Bitcoin, - Prover, - Sequencer, - FullNode, -} - -impl fmt::Display for NodeKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - NodeKind::Bitcoin => write!(f, "bitcoin"), - NodeKind::Prover => write!(f, "prover"), - NodeKind::Sequencer => write!(f, "sequencer"), - NodeKind::FullNode => write!(f, "full-node"), - } - } -} - -#[derive(Debug)] -pub struct ContainerSpawnOutput { - pub id: String, - pub ip: String, -} - -#[derive(Debug)] -pub enum SpawnOutput { - Child(Child), - Container(ContainerSpawnOutput), -} - -/// The Node trait defines the common interface shared between -/// BitcoinNode, Prover, Sequencer and FullNode -pub(crate) trait Node { - type Config; - type Client; - - /// Spawn a new node with specific config and return its child - fn spawn(test_config: &Self::Config) -> Result; - fn spawn_output(&mut self) -> &mut SpawnOutput; - - fn config_mut(&mut self) -> &mut Self::Config; - - /// Stops the running node - async fn stop(&mut self) -> Result<()> { - match self.spawn_output() { - SpawnOutput::Child(process) => { - process - .kill() - .await - .context("Failed to kill child process")?; - Ok(()) - } - SpawnOutput::Container(ContainerSpawnOutput { id, .. }) => { - println!("Stopping container {id}"); - let docker = - Docker::connect_with_local_defaults().context("Failed to connect to Docker")?; - docker - .stop_container(id, Some(StopContainerOptions { t: 10 })) - .await - .context("Failed to stop Docker container")?; - Ok(()) - } - } - } - - /// Wait for the node to be reachable by its client. - async fn wait_for_ready(&self, timeout: Option) -> Result<()>; - - fn client(&self) -> &Self::Client; - - #[allow(unused)] - fn env(&self) -> Vec<(&'static str, &'static str)> { - Vec::new() - } -} - -pub trait L2Node: Node { - async fn wait_for_l2_height(&self, height: u64, timeout: Option); -} - -impl L2Node for T -where - T: Node, -{ - async fn wait_for_l2_height(&self, height: u64, timeout: Option) { - wait_for_l2_block(self.client(), height, timeout).await - } -} - -// Two patterns supported : -// - Call wait_until_stopped, runs any extra commands needed for testing purposes, call start again. -// - Call restart if you need to wait for node to be fully shutdown and brough back up with new config. -pub trait Restart: Node { - async fn wait_until_stopped(&mut self) -> Result<()>; - async fn start(&mut self, new_config: Option) -> Result<()>; - - // Default implementation to support waiting for node to be fully shutdown and brough back up with new config. - async fn restart(&mut self, new_config: Option) -> Result<()> { - self.wait_until_stopped().await?; - self.start(new_config).await - } -} - -impl Restart for T -where - T: L2Node, -{ - async fn wait_until_stopped(&mut self) -> Result<()> { - self.stop().await?; - match self.spawn_output() { - SpawnOutput::Child(pid) => pid.wait().await?, - SpawnOutput::Container(_) => unimplemented!("L2 nodes don't run in docker yet"), - }; - Ok(()) - } - - async fn start(&mut self, new_config: Option) -> Result<()> { - let config = self.config_mut(); - if let Some(new_config) = new_config { - *config = new_config - } - *self.spawn_output() = Self::spawn(config)?; - self.wait_for_ready(None).await - } -} - -pub trait LogProvider: Node { - fn kind(&self) -> NodeKind; - fn log_path(&self) -> PathBuf; - fn as_erased(&self) -> &dyn LogProviderErased - where - Self: Sized, - { - self - } -} - -pub trait LogProviderErased { - fn kind(&self) -> NodeKind; - fn log_path(&self) -> PathBuf; -} - -impl LogProviderErased for T { - fn kind(&self) -> NodeKind { - LogProvider::kind(self) - } - - fn log_path(&self) -> PathBuf { - LogProvider::log_path(self) - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/prover.rs b/bin/citrea/tests/bitcoin_e2e/prover.rs deleted file mode 100644 index 54a5f3a056..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/prover.rs +++ /dev/null @@ -1,140 +0,0 @@ -use std::fs::File; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::process::Stdio; - -use anyhow::Context; -use tokio::process::Command; -use tokio::time::{sleep, Duration, Instant}; - -use super::config::{config_to_file, FullProverConfig, TestConfig}; -use super::framework::TestContext; -use super::node::{LogProvider, Node, NodeKind, SpawnOutput}; -use super::utils::{get_citrea_path, get_stderr_path, get_stdout_path, retry}; -use super::Result; -use crate::bitcoin_e2e::utils::get_genesis_path; -use crate::evm::make_test_client; -use crate::test_client::TestClient; -use crate::test_helpers::wait_for_prover_l1_height; - -#[allow(unused)] -pub struct Prover { - spawn_output: SpawnOutput, - config: FullProverConfig, - pub client: Box, -} - -impl Prover { - pub async fn new(ctx: &TestContext) -> Result { - let TestConfig { - prover: prover_config, - .. - } = &ctx.config; - - let spawn_output = Self::spawn(prover_config)?; - - let socket_addr = SocketAddr::new( - prover_config - .rollup - .rpc - .bind_host - .parse() - .context("Failed to parse bind host")?, - prover_config.rollup.rpc.bind_port, - ); - let client = retry(|| async { make_test_client(socket_addr).await }, None).await?; - - Ok(Self { - spawn_output, - config: prover_config.to_owned(), - client, - }) - } - - pub async fn wait_for_l1_height(&self, height: u64, timeout: Option) -> Result<()> { - wait_for_prover_l1_height(&self.client, height, timeout).await - } -} - -impl Node for Prover { - type Config = FullProverConfig; - type Client = TestClient; - - fn spawn(config: &Self::Config) -> Result { - let citrea = get_citrea_path(); - let dir = &config.dir; - - let stdout_file = - File::create(get_stdout_path(dir)).context("Failed to create stdout file")?; - let stderr_file = - File::create(get_stderr_path(dir)).context("Failed to create stderr file")?; - - let config_path = dir.join("prover_config.toml"); - config_to_file(&config.node, &config_path)?; - - let rollup_config_path = dir.join("prover_rollup_config.toml"); - config_to_file(&config.rollup, &rollup_config_path)?; - - Command::new(citrea) - .arg("--da-layer") - .arg("bitcoin") - .arg("--rollup-config-path") - .arg(rollup_config_path) - .arg("--prover-config-path") - .arg(config_path) - .arg("--genesis-paths") - .arg(get_genesis_path( - dir.parent().expect("Couldn't get parent dir"), - )) - .envs(config.env.clone()) - .stdout(Stdio::from(stdout_file)) - .stderr(Stdio::from(stderr_file)) - .kill_on_drop(true) - .spawn() - .context("Failed to spawn citrea process") - .map(SpawnOutput::Child) - } - - fn spawn_output(&mut self) -> &mut SpawnOutput { - &mut self.spawn_output - } - - async fn wait_for_ready(&self, timeout: Option) -> Result<()> { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(30)); - while start.elapsed() < timeout { - if self - .client - .ledger_get_head_soft_confirmation() - .await - .is_ok() - { - return Ok(()); - } - sleep(Duration::from_millis(500)).await; - } - anyhow::bail!("Prover failed to become ready within the specified timeout") - } - - fn client(&self) -> &Self::Client { - &self.client - } - - fn env(&self) -> Vec<(&'static str, &'static str)> { - self.config.env.clone() - } - - fn config_mut(&mut self) -> &mut Self::Config { - &mut self.config - } -} - -impl LogProvider for Prover { - fn kind(&self) -> NodeKind { - NodeKind::Prover - } - - fn log_path(&self) -> PathBuf { - get_stdout_path(&self.config.dir) - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/tests/prover_test.rs b/bin/citrea/tests/bitcoin_e2e/prover_test.rs similarity index 89% rename from bin/citrea/tests/bitcoin_e2e/tests/prover_test.rs rename to bin/citrea/tests/bitcoin_e2e/prover_test.rs index 7cbf6ac605..0e0a22e887 100644 --- a/bin/citrea/tests/bitcoin_e2e/tests/prover_test.rs +++ b/bin/citrea/tests/bitcoin_e2e/prover_test.rs @@ -6,18 +6,16 @@ use async_trait::async_trait; use bitcoin_da::service::{BitcoinService, BitcoinServiceConfig, TxidWrapper, FINALITY_DEPTH}; use bitcoin_da::spec::RollupParams; use bitcoincore_rpc::RpcApi; +use citrea_e2e::config::{SequencerConfig, TestCaseConfig}; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::node::NodeKind; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::Result; use citrea_primitives::{REVEAL_BATCH_PROOF_PREFIX, REVEAL_LIGHT_CLIENT_PREFIX}; use sov_rollup_interface::da::{DaData, SequencerCommitment}; use sov_rollup_interface::services::da::SenderWithNotifier; use tokio::sync::mpsc::UnboundedSender; -use crate::bitcoin_e2e::config::{SequencerConfig, TestCaseConfig}; -use crate::bitcoin_e2e::framework::TestFramework; -use crate::bitcoin_e2e::node::NodeKind; -use crate::bitcoin_e2e::test_case::{TestCase, TestCaseRunner}; -use crate::bitcoin_e2e::utils::get_tx_backup_dir; -use crate::bitcoin_e2e::Result; - /// This is a basic prover test showcasing spawning a bitcoin node as DA, a sequencer and a prover. /// It generates soft confirmations and wait until it reaches the first commitment. /// It asserts that the blob inscribe txs have been sent. @@ -28,7 +26,7 @@ struct BasicProverTest; impl TestCase for BasicProverTest { fn test_config() -> TestCaseConfig { TestCaseConfig { - with_prover: true, + with_batch_prover: true, with_full_node: true, ..Default::default() } @@ -46,8 +44,8 @@ impl TestCase for BasicProverTest { bail!("Sequencer not running. Set TestCaseConfig with_sequencer to true") }; - let Some(prover) = &f.prover else { - bail!("Prover not running. Set TestCaseConfig with_prover to true") + let Some(batch_prover) = &f.batch_prover else { + bail!("Batch Prover not running. Set TestCaseConfig with_prover to true") }; let Some(full_node) = &f.full_node else { @@ -61,14 +59,11 @@ impl TestCase for BasicProverTest { // Generate confirmed UTXOs da.generate(120, None).await?; - let seq_height0 = sequencer.client.eth_block_number().await; - assert_eq!(seq_height0, 0); - let min_soft_confirmations_per_commitment = sequencer.min_soft_confirmations_per_commitment(); for _ in 0..min_soft_confirmations_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } da.generate(FINALITY_DEPTH, None).await?; @@ -78,7 +73,9 @@ impl TestCase for BasicProverTest { da.generate(FINALITY_DEPTH, None).await?; let finalized_height = da.get_finalized_height().await?; - prover.wait_for_l1_height(finalized_height, None).await?; + batch_prover + .wait_for_l1_height(finalized_height, None) + .await?; da.generate(FINALITY_DEPTH, None).await?; let proofs = full_node @@ -119,7 +116,7 @@ struct SkipPreprovenCommitmentsTest { impl TestCase for SkipPreprovenCommitmentsTest { fn test_config() -> TestCaseConfig { TestCaseConfig { - with_prover: true, + with_batch_prover: true, with_full_node: true, ..Default::default() } @@ -130,8 +127,8 @@ impl TestCase for SkipPreprovenCommitmentsTest { bail!("Sequencer not running. Set TestCaseConfig with_sequencer to true") }; - let Some(prover) = &f.prover else { - bail!("Prover not running. Set TestCaseConfig with_prover to true") + let Some(prover) = &f.batch_prover else { + bail!("Batch Prover not running. Set TestCaseConfig with_prover to true") }; let Some(full_node) = &f.full_node else { @@ -163,7 +160,11 @@ impl TestCase for SkipPreprovenCommitmentsTest { // somehow resubmitted the same commitment. "045FFC81A3C1FDB3AF1359DBF2D114B0B3EFBF7F29CC9C5DA01267AA39D2C78D".to_owned(), ), - tx_backup_dir: get_tx_backup_dir(), + tx_backup_dir: Self::test_config() + .dir + .join("tx_backup_dir") + .display() + .to_string(), }; let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); // Keep sender for cleanup @@ -186,14 +187,11 @@ impl TestCase for SkipPreprovenCommitmentsTest { // Generate 1 FINALIZED DA block. da.generate(1 + FINALITY_DEPTH, None).await?; - let seq_height0 = sequencer.client.eth_block_number().await; - assert_eq!(seq_height0, 0); - let min_soft_confirmations_per_commitment = sequencer.min_soft_confirmations_per_commitment(); for _ in 0..min_soft_confirmations_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } da.generate(FINALITY_DEPTH, None).await?; @@ -263,7 +261,7 @@ impl TestCase for SkipPreprovenCommitmentsTest { // Trigger a new commitment. for _ in 0..min_soft_confirmations_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } // Wait for the sequencer commitment to be submitted & accepted. diff --git a/bin/citrea/tests/bitcoin_e2e/sequencer.rs b/bin/citrea/tests/bitcoin_e2e/sequencer.rs deleted file mode 100644 index 37d5fc1f2d..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/sequencer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::fs::File; -use std::net::SocketAddr; -use std::path::PathBuf; -use std::process::Stdio; - -use anyhow::Context; -use tokio::process::Command; -use tokio::time::{sleep, Duration, Instant}; - -use super::config::{config_to_file, FullSequencerConfig, TestConfig}; -use super::framework::TestContext; -use super::node::{LogProvider, Node, NodeKind, SpawnOutput}; -use super::utils::{get_citrea_path, get_stderr_path, get_stdout_path, retry}; -use super::Result; -use crate::bitcoin_e2e::utils::get_genesis_path; -use crate::evm::make_test_client; -use crate::test_client::TestClient; - -#[allow(unused)] -pub struct Sequencer { - spawn_output: SpawnOutput, - config: FullSequencerConfig, - pub client: Box, -} - -impl Sequencer { - pub async fn new(ctx: &TestContext) -> Result { - let TestConfig { - sequencer: config, .. - } = &ctx.config; - - let spawn_output = Self::spawn(config)?; - - let socket_addr = SocketAddr::new( - config - .rollup - .rpc - .bind_host - .parse() - .context("Failed to parse bind host")?, - config.rollup.rpc.bind_port, - ); - - let client = retry(|| async { make_test_client(socket_addr).await }, None).await?; - - Ok(Self { - spawn_output, - config: config.clone(), - client, - }) - } - - pub fn dir(&self) -> &PathBuf { - &self.config.dir - } - - pub fn min_soft_confirmations_per_commitment(&self) -> u64 { - self.config.node.min_soft_confirmations_per_commitment - } -} - -impl Node for Sequencer { - type Config = FullSequencerConfig; - type Client = TestClient; - - fn spawn(config: &Self::Config) -> Result { - let citrea = get_citrea_path(); - let dir = &config.dir; - - let stdout_file = - File::create(get_stdout_path(dir)).context("Failed to create stdout file")?; - let stderr_file = - File::create(get_stderr_path(dir)).context("Failed to create stderr file")?; - - let config_path = dir.join("sequencer_config.toml"); - config_to_file(&config.node, &config_path)?; - - let rollup_config_path = dir.join("sequencer_rollup_config.toml"); - config_to_file(&config.rollup, &rollup_config_path)?; - - Command::new(citrea) - .arg("--da-layer") - .arg("bitcoin") - .arg("--rollup-config-path") - .arg(rollup_config_path) - .arg("--sequencer-config-path") - .arg(config_path) - .arg("--genesis-paths") - .arg(get_genesis_path( - dir.parent().expect("Couldn't get parent dir"), - )) - .envs(config.env.clone()) - .stdout(Stdio::from(stdout_file)) - .stderr(Stdio::from(stderr_file)) - .kill_on_drop(true) - .spawn() - .context("Failed to spawn citrea process") - .map(SpawnOutput::Child) - } - - fn spawn_output(&mut self) -> &mut SpawnOutput { - &mut self.spawn_output - } - - async fn wait_for_ready(&self, timeout: Option) -> Result<()> { - let start = Instant::now(); - let timeout = timeout.unwrap_or(Duration::from_secs(30)); - while start.elapsed() < timeout { - if self - .client - .ledger_get_head_soft_confirmation() - .await - .is_ok() - { - return Ok(()); - } - sleep(Duration::from_millis(500)).await; - } - anyhow::bail!("Sequencer failed to become ready within the specified timeout") - } - - fn client(&self) -> &Self::Client { - &self.client - } - - fn env(&self) -> Vec<(&'static str, &'static str)> { - self.config.env.clone() - } - - fn config_mut(&mut self) -> &mut Self::Config { - &mut self.config - } -} - -impl LogProvider for Sequencer { - fn kind(&self) -> NodeKind { - NodeKind::Sequencer - } - - fn log_path(&self) -> PathBuf { - get_stdout_path(self.dir()) - } -} diff --git a/bin/citrea/tests/bitcoin_e2e/tests/sequencer_commitments.rs b/bin/citrea/tests/bitcoin_e2e/sequencer_commitments.rs similarity index 84% rename from bin/citrea/tests/bitcoin_e2e/tests/sequencer_commitments.rs rename to bin/citrea/tests/bitcoin_e2e/sequencer_commitments.rs index dcbb2506ee..b72c3071ce 100644 --- a/bin/citrea/tests/bitcoin_e2e/tests/sequencer_commitments.rs +++ b/bin/citrea/tests/bitcoin_e2e/sequencer_commitments.rs @@ -1,29 +1,25 @@ use async_trait::async_trait; use bitcoin::hashes::Hash; -use bitcoin_da::service::FINALITY_DEPTH; -use bitcoin_da::spec::BitcoinSpec; +use bitcoin_da::service::{get_relevant_blobs_from_txs, FINALITY_DEPTH}; use bitcoincore_rpc::RpcApi; use borsh::BorshDeserialize; +use citrea_e2e::bitcoin::BitcoinNode; +use citrea_e2e::config::{SequencerConfig, TestCaseConfig}; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::sequencer::Sequencer; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::Result; +use citrea_primitives::REVEAL_BATCH_PROOF_PREFIX; use rs_merkle::algorithms::Sha256; use rs_merkle::MerkleTree; -use sov_modules_api::BlobReaderTrait; -use sov_rollup_interface::da::DaData; - -use crate::bitcoin_e2e::bitcoin::BitcoinNode; -use crate::bitcoin_e2e::config::{SequencerConfig, TestCaseConfig}; -use crate::bitcoin_e2e::framework::TestFramework; -use crate::bitcoin_e2e::node::L2Node; -use crate::bitcoin_e2e::sequencer::Sequencer; -use crate::bitcoin_e2e::test_case::{TestCase, TestCaseRunner}; -use crate::bitcoin_e2e::Result; - +use sov_rollup_interface::da::{BlobReaderTrait, DaData}; struct LedgerGetCommitmentsProverTest; #[async_trait] impl TestCase for LedgerGetCommitmentsProverTest { fn test_config() -> TestCaseConfig { TestCaseConfig { - with_prover: true, + with_batch_prover: true, ..Default::default() } } @@ -35,17 +31,17 @@ impl TestCase for LedgerGetCommitmentsProverTest { async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { let sequencer = f.sequencer.as_ref().unwrap(); let da = f.bitcoin_nodes.get(0).expect("DA not running."); - let prover = f.prover.as_ref().unwrap(); + let prover = f.batch_prover.as_ref().unwrap(); let min_soft_confirmations_per_commitment = sequencer.min_soft_confirmations_per_commitment(); for _ in 0..min_soft_confirmations_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } sequencer .wait_for_l2_height(min_soft_confirmations_per_commitment, None) - .await; + .await?; // Wait for blob tx to hit the mempool da.wait_mempool_len(1, None).await?; @@ -115,13 +111,13 @@ impl TestCase for LedgerGetCommitmentsTest { sequencer.min_soft_confirmations_per_commitment(); for _ in 0..min_soft_confirmations_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } // disable this since it's the only difference from other tests?? // da.generate(1, None).await?; - // sequencer.client.send_publish_batch_request().await; + // sequencer.client.send_publish_batch_request().await?; // Wait for blob tx to hit the mempool da.wait_mempool_len(1, None).await?; @@ -131,7 +127,7 @@ impl TestCase for LedgerGetCommitmentsTest { full_node .wait_for_l2_height(min_soft_confirmations_per_commitment, None) - .await; + .await?; let finalized_height = da.get_finalized_height().await?; @@ -185,11 +181,11 @@ impl TestCase for SequencerSendCommitmentsToDaTest { // publish min_soft_confirmations_per_commitment - 1 confirmations, no commitments should be sent for _ in 0..min_soft_confirmations_per_commitment - 1 { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } sequencer .wait_for_l2_height(min_soft_confirmations_per_commitment - 1, None) - .await; + .await?; da.generate(FINALITY_DEPTH, None).await?; tokio::time::sleep(std::time::Duration::from_millis(1000)).await; @@ -197,24 +193,27 @@ impl TestCase for SequencerSendCommitmentsToDaTest { let finalized_height = da.get_finalized_height().await?; for height in initial_height..finalized_height { - let mut blobs = da.get_relevant_blobs_from_block(height).await?; + let hash = da.get_block_hash(height).await?; + let block = da.get_block(&hash).await?; + + let mut blobs = get_relevant_blobs_from_txs(block.txdata, REVEAL_BATCH_PROOF_PREFIX); for mut blob in blobs.drain(0..) { - let data = blob.full_data(); + let data = BlobReaderTrait::full_data(&mut blob); assert_eq!(data, &[] as &[u8]); } } // Publish one more L2 block and send commitment - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; sequencer .wait_for_l2_height( min_soft_confirmations_per_commitment + FINALITY_DEPTH - 1, None, ) - .await; + .await?; // Wait for blob tx to hit the mempool da.wait_mempool_len(1, None).await?; @@ -230,14 +229,14 @@ impl TestCase for SequencerSendCommitmentsToDaTest { .await?; for _ in 0..min_soft_confirmations_per_commitment { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } sequencer .wait_for_l2_height( end_l2_block + min_soft_confirmations_per_commitment + FINALITY_DEPTH - 2, None, ) - .await; + .await?; // Wait for blob tx to hit the mempool da.wait_mempool_len(1, None).await?; @@ -265,13 +264,16 @@ impl SequencerSendCommitmentsToDaTest { let finalized_height = da.get_finalized_height().await?; // Extract and verify the commitment from the block - let mut blobs = da.get_relevant_blobs_from_block(finalized_height).await?; + let hash = da.get_block_hash(finalized_height).await?; + let block = da.get_block(&hash).await?; + + let mut blobs = get_relevant_blobs_from_txs(block.txdata, REVEAL_BATCH_PROOF_PREFIX); assert_eq!(blobs.len(), 1); let mut blob = blobs.pop().unwrap(); - let data = blob.full_data(); + let data = BlobReaderTrait::full_data(&mut blob); let commitment = DaData::try_from_slice(data).unwrap(); @@ -287,8 +289,8 @@ impl SequencerSendCommitmentsToDaTest { soft_confirmations.push( sequencer .client - .ledger_get_soft_confirmation_by_number::(i) - .await + .ledger_get_soft_confirmation_by_number(i) + .await? .unwrap(), ); } diff --git a/bin/citrea/tests/bitcoin_e2e/tests/sequencer_test.rs b/bin/citrea/tests/bitcoin_e2e/sequencer_test.rs similarity index 76% rename from bin/citrea/tests/bitcoin_e2e/tests/sequencer_test.rs rename to bin/citrea/tests/bitcoin_e2e/sequencer_test.rs index 10edbba4bd..fa66f1df0f 100644 --- a/bin/citrea/tests/bitcoin_e2e/tests/sequencer_test.rs +++ b/bin/citrea/tests/bitcoin_e2e/sequencer_test.rs @@ -1,13 +1,11 @@ use anyhow::bail; use async_trait::async_trait; -use bitcoin_da::spec::BitcoinSpec; use bitcoincore_rpc::RpcApi; -use citrea_sequencer::SequencerConfig; - -use crate::bitcoin_e2e::framework::TestFramework; -use crate::bitcoin_e2e::node::{L2Node, Restart}; -use crate::bitcoin_e2e::test_case::{TestCase, TestCaseRunner}; -use crate::bitcoin_e2e::Result; +use citrea_e2e::config::SequencerConfig; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::traits::Restart; +use citrea_e2e::Result; struct BasicSequencerTest; @@ -22,15 +20,26 @@ impl TestCase for BasicSequencerTest { bail!("bitcoind not running. Test cannot run with bitcoind runnign as DA") }; - let seq_height0 = sequencer.client.eth_block_number().await; - assert_eq!(seq_height0, 0); + sequencer.client.send_publish_batch_request().await?; + + let head_batch0 = sequencer + .client + .ledger_get_head_soft_confirmation() + .await? + .unwrap(); + assert_eq!(head_batch0.l2_height, 1); + + sequencer.client.send_publish_batch_request().await?; - sequencer.client.send_publish_batch_request().await; da.generate(1, None).await?; - sequencer.wait_for_l2_height(1, None).await; - let seq_height1 = sequencer.client.eth_block_number().await; - assert_eq!(seq_height1, 1); + sequencer.client.wait_for_l2_block(1, None).await?; + let head_batch1 = sequencer + .client + .ledger_get_head_soft_confirmation() + .await? + .unwrap(); + assert_eq!(head_batch1.l2_height, 2); Ok(()) } @@ -68,7 +77,7 @@ impl TestCase for SequencerMissedDaBlocksTest { // Create initial DA blocks da.generate(3, None).await?; - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; sequencer.wait_until_stopped().await?; @@ -79,15 +88,13 @@ impl TestCase for SequencerMissedDaBlocksTest { sequencer.start(None).await?; for _ in 0..10 { - sequencer.client.send_publish_batch_request().await; + sequencer.client.send_publish_batch_request().await?; } let head_soft_confirmation_height = sequencer .client .ledger_get_head_soft_confirmation_height() - .await - .unwrap() - .unwrap(); + .await?; let mut last_used_l1_height = initial_l1_height; @@ -98,8 +105,8 @@ impl TestCase for SequencerMissedDaBlocksTest { for i in 1..=head_soft_confirmation_height { let soft_confirmation = sequencer .client - .ledger_get_soft_confirmation_by_number::(i) - .await + .ledger_get_soft_confirmation_by_number(i) + .await? .unwrap(); if i == 1 { diff --git a/bin/citrea/tests/bitcoin_e2e/test_case.rs b/bin/citrea/tests/bitcoin_e2e/test_case.rs deleted file mode 100644 index ff0e577971..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/test_case.rs +++ /dev/null @@ -1,344 +0,0 @@ -//! This module provides the TestCaseRunner and TestCase trait for running and defining test cases. -//! It handles setup, execution, and cleanup of test environments. - -use std::panic::{self}; -use std::path::{Path, PathBuf}; -use std::time::Duration; - -use anyhow::{bail, Context}; -use async_trait::async_trait; -use bitcoin_da::service::BitcoinServiceConfig; -use citrea_sequencer::SequencerConfig; -use futures::FutureExt; -use sov_stf_runner::{ProverConfig, RpcConfig, RunnerConfig, StorageConfig}; - -use super::config::{ - default_rollup_config, BitcoinConfig, FullFullNodeConfig, FullProverConfig, - FullSequencerConfig, RollupConfig, TestCaseConfig, TestCaseEnv, TestConfig, -}; -use super::framework::TestFramework; -use super::node::NodeKind; -use super::utils::{copy_directory, get_available_port, get_tx_backup_dir}; -use super::Result; -use crate::bitcoin_e2e::node::Node; -use crate::bitcoin_e2e::utils::{get_default_genesis_path, get_workspace_root}; - -// TestCaseRunner manages the lifecycle of a test case, including setup, execution, and cleanup. -/// It creates a test framework with the associated configs, spawns required nodes, connects them, -/// runs the test case, and performs cleanup afterwards. The `run` method handles any panics that -/// might occur during test execution and takes care of cleaning up and stopping the child processes. -pub struct TestCaseRunner(T); - -impl TestCaseRunner { - /// Creates a new TestCaseRunner with the given test case. - pub fn new(test_case: T) -> Self { - Self(test_case) - } - - /// Internal method to fund the wallets, connect the nodes, wait for them to be ready. - async fn prepare(&self, f: &mut TestFramework) -> Result<()> { - f.fund_da_wallets().await?; - f.init_nodes().await?; - f.show_log_paths(); - f.bitcoin_nodes.connect_nodes().await?; - - if let Some(sequencer) = &f.sequencer { - sequencer - .wait_for_ready(Some(Duration::from_secs(5))) - .await?; - } - - Ok(()) - } - - async fn run_test_case(&mut self, f: &mut TestFramework) -> Result<()> { - self.prepare(f).await?; - self.0.setup(f).await?; - self.0.run_test(f).await - } - - /// Executes the test case, handling any panics and performing cleanup. - /// - /// This sets up the framework, executes the test, and ensures cleanup is performed even if a panic occurs. - pub async fn run(mut self) -> Result<()> { - let mut framework = None; - let result = panic::AssertUnwindSafe(async { - framework = Some(TestFramework::new(Self::generate_test_config()?).await?); - let f = framework.as_mut().unwrap(); - self.run_test_case(f).await - }) - .catch_unwind() - .await; - - let f = framework - .as_mut() - .expect("Framework not correctly initialized"); - - if result.is_err() { - if let Err(e) = f.dump_log() { - eprintln!("Error dumping log: {}", e); - } - } - - f.stop().await?; - - // Additional test cleanup - self.0.cleanup().await?; - - match result { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(e), - Err(panic_error) => { - let panic_msg = panic_error - .downcast_ref::() - .map(|s| s.to_string()) - .unwrap_or_else(|| "Unknown panic".to_string()); - bail!(panic_msg) - } - } - } - - fn generate_test_config() -> Result { - let test_case = T::test_config(); - let env = T::test_env(); - let bitcoin = T::bitcoin_config(); - let prover = T::prover_config(); - let sequencer = T::sequencer_config(); - let sequencer_rollup = default_rollup_config(); - let prover_rollup = default_rollup_config(); - let full_node_rollup = default_rollup_config(); - - let [bitcoin_dir, dbs_dir, prover_dir, sequencer_dir, full_node_dir, genesis_dir] = - create_dirs(&test_case.dir)?; - - copy_genesis_dir(&test_case.genesis_dir, &genesis_dir)?; - - let mut bitcoin_confs = vec![]; - for i in 0..test_case.n_nodes { - let data_dir = bitcoin_dir.join(i.to_string()); - std::fs::create_dir_all(&data_dir) - .with_context(|| format!("Failed to create {} directory", data_dir.display()))?; - - let p2p_port = get_available_port()?; - let rpc_port = get_available_port()?; - - bitcoin_confs.push(BitcoinConfig { - p2p_port, - rpc_port, - data_dir, - env: env.bitcoin().clone(), - idx: i, - ..bitcoin.clone() - }) - } - - // Target first bitcoin node as DA for now - let da_config: BitcoinServiceConfig = bitcoin_confs[0].clone().into(); - - let sequencer_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::Sequencer.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - da_private_key: Some( - "045FFC81A3C1FDB3AF1359DBF2D114B0B3EFBF7F29CC9C5DA01267AA39D2C78D" - .to_string(), - ), - node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), - tx_backup_dir: get_tx_backup_dir(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{}-db", node_kind)), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..sequencer_rollup.rpc - }, - ..sequencer_rollup - } - }; - - let runner_config = Some(RunnerConfig { - sequencer_client_url: format!( - "http://{}:{}", - sequencer_rollup.rpc.bind_host, sequencer_rollup.rpc.bind_port, - ), - include_tx_body: true, - accept_public_input_as_proven: Some(true), - pruning_config: None, - sync_blocks_count: 10, - }); - - let prover_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::Prover.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - da_private_key: Some( - "75BAF964D074594600366E5B111A1DA8F86B2EFE2D22DA51C8D82126A0FCAC72" - .to_string(), - ), - node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), - tx_backup_dir: get_tx_backup_dir(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{}-db", node_kind)), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..prover_rollup.rpc - }, - runner: runner_config.clone(), - ..prover_rollup - } - }; - - let full_node_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::FullNode.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - node_url: format!( - "http://{}/wallet/{}", - da_config.node_url, - NodeKind::Bitcoin // Use default wallet - ), - tx_backup_dir: get_tx_backup_dir(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{}-db", node_kind)), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..full_node_rollup.rpc - }, - runner: runner_config.clone(), - ..full_node_rollup - } - }; - - Ok(TestConfig { - bitcoin: bitcoin_confs, - sequencer: FullSequencerConfig { - rollup: sequencer_rollup, - dir: sequencer_dir, - docker_image: None, - node: sequencer, - env: env.sequencer(), - }, - prover: FullProverConfig { - rollup: prover_rollup, - dir: prover_dir, - docker_image: None, - node: prover, - env: env.prover(), - }, - full_node: FullFullNodeConfig { - rollup: full_node_rollup, - dir: full_node_dir, - docker_image: None, - node: (), - env: env.full_node(), - }, - test_case, - }) - } -} - -/// Defines the interface for implementing test cases. -/// -/// This trait should be implemented by every test case to define the configuration -/// and inner test logic. It provides default configurations that should be sane for most test cases, -/// which can be overridden by implementing the associated methods. -#[async_trait] -pub trait TestCase: Send + Sync + 'static { - /// Returns the test case configuration. - /// Override this method to provide custom test configurations. - fn test_config() -> TestCaseConfig { - TestCaseConfig::default() - } - - /// Returns the test case env. - /// Override this method to provide custom env per node. - fn test_env() -> TestCaseEnv { - TestCaseEnv::default() - } - - /// Returns the Bitcoin configuration for the test. - /// Override this method to provide a custom Bitcoin configuration. - fn bitcoin_config() -> BitcoinConfig { - BitcoinConfig::default() - } - - /// Returns the sequencer configuration for the test. - /// Override this method to provide a custom sequencer configuration. - fn sequencer_config() -> SequencerConfig { - SequencerConfig::default() - } - - /// Returns the prover configuration for the test. - /// Override this method to provide a custom prover configuration. - fn prover_config() -> ProverConfig { - ProverConfig::default() - } - - /// Returns the test setup - /// Override this method to add custom initialization logic - async fn setup(&self, _framework: &mut TestFramework) -> Result<()> { - Ok(()) - } - - /// Implements the actual test logic. - /// - /// This method is where the test case should be implemented. It receives - /// a reference to the TestFramework, which provides access to the test environment. - /// - /// # Arguments - /// * `framework` - A reference to the TestFramework instance - async fn run_test(&mut self, framework: &mut TestFramework) -> Result<()>; - - async fn cleanup(&self) -> Result<()> { - Ok(()) - } -} - -fn create_dirs(base_dir: &Path) -> Result<[PathBuf; 6]> { - let paths = [ - NodeKind::Bitcoin.to_string(), - "dbs".to_string(), - NodeKind::Prover.to_string(), - NodeKind::Sequencer.to_string(), - NodeKind::FullNode.to_string(), - "genesis".to_string(), - ] - .map(|dir| base_dir.join(dir)); - - for path in &paths { - std::fs::create_dir_all(path) - .with_context(|| format!("Failed to create {} directory", path.display()))?; - } - - Ok(paths) -} - -fn copy_genesis_dir(genesis_dir: &Option, target_dir: &Path) -> std::io::Result<()> { - let genesis_dir = - genesis_dir - .as_ref() - .map(PathBuf::from) - .map_or_else(get_default_genesis_path, |dir| { - if dir.is_absolute() { - dir - } else { - get_workspace_root().join(dir) - } - }); - - copy_directory(genesis_dir, target_dir) -} diff --git a/bin/citrea/tests/bitcoin_e2e/tests/mod.rs b/bin/citrea/tests/bitcoin_e2e/tests/mod.rs deleted file mode 100644 index e951a6b23f..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/tests/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod bitcoin_test; -// pub mod mempool_accept; -pub mod prover_test; -pub mod sequencer_commitments; -pub mod sequencer_test; diff --git a/bin/citrea/tests/bitcoin_e2e/utils.rs b/bin/citrea/tests/bitcoin_e2e/utils.rs deleted file mode 100644 index 05e395f9c1..0000000000 --- a/bin/citrea/tests/bitcoin_e2e/utils.rs +++ /dev/null @@ -1,149 +0,0 @@ -use std::fs::File; -use std::future::Future; -use std::io::{BufRead, BufReader}; -use std::net::TcpListener; -use std::path::{Path, PathBuf}; -use std::{fs, io}; - -use anyhow::bail; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use tokio::time::{sleep, Duration, Instant}; - -use super::Result; - -pub fn get_available_port() -> Result { - let listener = TcpListener::bind("127.0.0.1:0")?; - Ok(listener.local_addr()?.port()) -} - -pub fn get_workspace_root() -> PathBuf { - let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - manifest_dir - .ancestors() - .nth(2) - .expect("Failed to find workspace root") - .to_path_buf() -} - -/// Get citrea path from CITREA env or resolves to debug build. -pub fn get_citrea_path() -> PathBuf { - std::env::var("CITREA").map_or_else( - |_| { - let workspace_root = get_workspace_root(); - let mut path = workspace_root.to_path_buf(); - path.push("target"); - path.push("debug"); - path.push("citrea"); - path - }, - PathBuf::from, - ) -} - -pub fn get_stdout_path(dir: &Path) -> PathBuf { - dir.join("stdout.log") -} - -pub fn get_stderr_path(dir: &Path) -> PathBuf { - dir.join("stderr.log") -} - -/// Get genesis path from resources -/// TODO: assess need for customable genesis path in e2e tests -pub fn get_default_genesis_path() -> PathBuf { - let workspace_root = get_workspace_root(); - let mut path = workspace_root.to_path_buf(); - path.push("resources"); - path.push("genesis"); - path.push("bitcoin-regtest"); - path -} - -pub fn get_genesis_path(dir: &Path) -> PathBuf { - dir.join("genesis") -} - -pub fn generate_test_id() -> String { - thread_rng() - .sample_iter(&Alphanumeric) - .take(10) - .map(char::from) - .collect() -} - -pub fn copy_directory(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - let src = src.as_ref(); - let dst = dst.as_ref(); - - if !dst.exists() { - fs::create_dir_all(dst)?; - } - - for entry in fs::read_dir(src)? { - let entry = entry?; - let ty = entry.file_type()?; - let file_name = entry.file_name(); - let src_path = src.join(&file_name); - let dst_path = dst.join(&file_name); - - if ty.is_dir() { - copy_directory(&src_path, &dst_path)?; - } else { - fs::copy(&src_path, &dst_path)?; - } - } - - Ok(()) -} - -pub(crate) async fn retry(f: F, timeout: Option) -> Result -where - F: Fn() -> Fut, - Fut: Future>, -{ - let start = Instant::now(); - let timeout = start + timeout.unwrap_or_else(|| Duration::from_secs(5)); - - loop { - match tokio::time::timeout_at(timeout, f()).await { - Ok(Ok(result)) => return Ok(result), - Ok(Err(e)) => { - if Instant::now() >= timeout { - return Err(e); - } - sleep(Duration::from_millis(500)).await; - } - Err(elapsed) => bail!("Timeout expired {elapsed}"), - } - } -} - -pub fn tail_file(path: &Path, lines: usize) -> Result<()> { - let file = File::open(path)?; - let reader = BufReader::new(file); - let mut last_lines = Vec::with_capacity(lines); - - for line in reader.lines() { - let line = line?; - if last_lines.len() >= lines { - last_lines.remove(0); - } - last_lines.push(line); - } - - for line in last_lines { - println!("{}", line); - } - - Ok(()) -} - -pub fn get_tx_backup_dir() -> String { - let workspace_root = get_workspace_root(); - let mut path = workspace_root.to_path_buf(); - path.push("resources"); - path.push("bitcoin"); - path.push("inscription_txs"); - path.to_str().expect("Failed to convert path").to_string() -} diff --git a/bin/citrea/tests/e2e/mod.rs b/bin/citrea/tests/e2e/mod.rs index fab958702c..82a971a877 100644 --- a/bin/citrea/tests/e2e/mod.rs +++ b/bin/citrea/tests/e2e/mod.rs @@ -12,14 +12,13 @@ use std::path::{Path, PathBuf}; use std::str::FromStr; use std::time::Duration; +use citrea_common::{ProverConfig, SequencerConfig}; use citrea_evm::smart_contracts::SimpleStorageContract; -use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag, U256}; use sov_mock_da::{MockAddress, MockDaService}; use sov_rollup_interface::rpc::{LastVerifiedProofResponse, SoftConfirmationStatus}; use sov_rollup_interface::services::da::DaService; -use sov_stf_runner::ProverConfig; use tokio::task::JoinHandle; use crate::evm::{init_test_rollup, make_test_client}; diff --git a/bin/citrea/tests/e2e/proving.rs b/bin/citrea/tests/e2e/proving.rs index d9440ba882..8d743c449a 100644 --- a/bin/citrea/tests/e2e/proving.rs +++ b/bin/citrea/tests/e2e/proving.rs @@ -1,12 +1,11 @@ /// Prover node, proving and full node proof verification related tests use std::time::Duration; -use citrea_sequencer::SequencerConfig; +use citrea_common::{ProverConfig, SequencerConfig}; use citrea_stf::genesis_config::GenesisPaths; use sov_mock_da::{MockAddress, MockDaService}; use sov_rollup_interface::rpc::SoftConfirmationStatus; use sov_rollup_interface::services::da::DaService; -use sov_stf_runner::ProverConfig; use crate::evm::make_test_client; use crate::test_helpers::{ diff --git a/bin/citrea/tests/e2e/reopen.rs b/bin/citrea/tests/e2e/reopen.rs index a6c7497618..8dfcc1e1bf 100644 --- a/bin/citrea/tests/e2e/reopen.rs +++ b/bin/citrea/tests/e2e/reopen.rs @@ -4,11 +4,10 @@ use std::str::FromStr; use std::time::Duration; -use citrea_sequencer::SequencerConfig; +use citrea_common::{ProverConfig, SequencerConfig}; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag}; use sov_mock_da::{MockAddress, MockDaService}; -use sov_stf_runner::ProverConfig; use tokio::runtime::Runtime; use tokio::time::sleep; diff --git a/bin/citrea/tests/e2e/sequencer_behaviour.rs b/bin/citrea/tests/e2e/sequencer_behaviour.rs index aa511a3ffd..9ca63939cd 100644 --- a/bin/citrea/tests/e2e/sequencer_behaviour.rs +++ b/bin/citrea/tests/e2e/sequencer_behaviour.rs @@ -6,7 +6,7 @@ use alloy::consensus::{Signed, TxEip1559, TxEnvelope}; use alloy::signers::local::PrivateKeySigner; use alloy::signers::Signer; use alloy_rlp::{BytesMut, Encodable}; -use citrea_sequencer::{SequencerConfig, SequencerMempoolConfig}; +use citrea_common::{SequencerConfig, SequencerMempoolConfig}; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag}; use sov_mock_da::{MockAddress, MockDaService, MockDaSpec}; diff --git a/bin/citrea/tests/e2e/sequencer_replacement.rs b/bin/citrea/tests/e2e/sequencer_replacement.rs index c12410ccad..48fc4587d4 100644 --- a/bin/citrea/tests/e2e/sequencer_replacement.rs +++ b/bin/citrea/tests/e2e/sequencer_replacement.rs @@ -6,7 +6,7 @@ use std::time::Duration; use alloy::consensus::{Signed, TxEip1559, TxEnvelope}; use alloy_rlp::Decodable; -use citrea_sequencer::{SequencerConfig, SequencerMempoolConfig}; +use citrea_common::{SequencerConfig, SequencerMempoolConfig}; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag}; use sov_db::ledger_db::{LedgerDB, SequencerLedgerOps}; diff --git a/bin/citrea/tests/e2e/syncing.rs b/bin/citrea/tests/e2e/syncing.rs index ab9c084db5..0a36535d77 100644 --- a/bin/citrea/tests/e2e/syncing.rs +++ b/bin/citrea/tests/e2e/syncing.rs @@ -2,14 +2,13 @@ use std::str::FromStr; use std::time::Duration; -use citrea_sequencer::SequencerConfig; +use citrea_common::{ProverConfig, SequencerConfig}; use citrea_stf::genesis_config::GenesisPaths; use ethereum_rpc::LayerStatus; use reth_primitives::{Address, BlockNumberOrTag}; use sov_mock_da::{MockAddress, MockDaService, MockDaSpec, MockHash}; use sov_rollup_interface::da::{DaDataLightClient, DaSpec}; use sov_rollup_interface::services::da::DaService; -use sov_stf_runner::ProverConfig; use tokio::time::sleep; use crate::e2e::{execute_blocks, initialize_test, TestConfig}; diff --git a/bin/citrea/tests/e2e/tx_propagation.rs b/bin/citrea/tests/e2e/tx_propagation.rs index 21b22e7ab6..f6557e9d0d 100644 --- a/bin/citrea/tests/e2e/tx_propagation.rs +++ b/bin/citrea/tests/e2e/tx_propagation.rs @@ -1,7 +1,7 @@ /// Tests that check the full node's ability to send a transaction to the sequencer. use std::str::FromStr; -use citrea_sequencer::SequencerConfig; +use citrea_common::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag, TxHash}; diff --git a/bin/citrea/tests/evm/archival_state.rs b/bin/citrea/tests/evm/archival_state.rs index fbc8e07ea0..be76071e6f 100644 --- a/bin/citrea/tests/evm/archival_state.rs +++ b/bin/citrea/tests/evm/archival_state.rs @@ -1,8 +1,8 @@ use std::str::FromStr; use std::time::Duration; +use citrea_common::SequencerConfig; use citrea_evm::smart_contracts::SimpleStorageContract; -use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, U256}; use tokio::time::sleep; diff --git a/bin/citrea/tests/evm/fee.rs b/bin/citrea/tests/evm/fee.rs index 5ffbec8d42..94e7fdcba0 100644 --- a/bin/citrea/tests/evm/fee.rs +++ b/bin/citrea/tests/evm/fee.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use citrea_sequencer::SequencerConfig; +use citrea_common::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::BlockNumberOrTag; diff --git a/bin/citrea/tests/evm/gas_price.rs b/bin/citrea/tests/evm/gas_price.rs index d3b227dd9e..83ecffb5bc 100644 --- a/bin/citrea/tests/evm/gas_price.rs +++ b/bin/citrea/tests/evm/gas_price.rs @@ -3,8 +3,8 @@ use std::time::Duration; use alloy::signers::local::PrivateKeySigner; use alloy::signers::Signer; +use citrea_common::SequencerConfig; use citrea_evm::smart_contracts::SimpleStorageContract; -use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{BlockNumberOrTag, U256}; diff --git a/bin/citrea/tests/evm/mod.rs b/bin/citrea/tests/evm/mod.rs index bad5b251cd..309ee4905e 100644 --- a/bin/citrea/tests/evm/mod.rs +++ b/bin/citrea/tests/evm/mod.rs @@ -3,10 +3,10 @@ use std::str::FromStr; use alloy::signers::local::PrivateKeySigner; use alloy::signers::Signer; +use citrea_common::SequencerConfig; // use citrea::initialize_logging; use citrea_evm::smart_contracts::{LogsContract, SimpleStorageContract, TestContract}; use citrea_evm::system_contracts::BitcoinLightClient; -use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, U256}; use sov_rollup_interface::CITREA_VERSION; diff --git a/bin/citrea/tests/evm/subscription.rs b/bin/citrea/tests/evm/subscription.rs index 61724285e3..2a0b48e2b0 100644 --- a/bin/citrea/tests/evm/subscription.rs +++ b/bin/citrea/tests/evm/subscription.rs @@ -4,9 +4,9 @@ use std::time::Duration; use alloy_primitives::FixedBytes; use alloy_sol_types::SolEvent; +use citrea_common::SequencerConfig; use citrea_evm::smart_contracts::{AnotherLogEvent, LogEvent, LogsContract, TestContract}; use citrea_evm::{Filter, LogResponse}; -use citrea_sequencer::SequencerConfig; // use citrea::initialize_logging; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{keccak256, Address}; diff --git a/bin/citrea/tests/evm/tracing.rs b/bin/citrea/tests/evm/tracing.rs index dd09fb4583..de08e14118 100644 --- a/bin/citrea/tests/evm/tracing.rs +++ b/bin/citrea/tests/evm/tracing.rs @@ -1,8 +1,8 @@ use std::str::FromStr; +use citrea_common::SequencerConfig; // use citrea::initialize_logging; use citrea_evm::smart_contracts::{CallerContract, SimpleStorageContract}; -use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag}; use reth_rpc_types::trace::geth::GethTrace::{self, CallTracer, FourByteTracer}; diff --git a/bin/citrea/tests/mempool/mod.rs b/bin/citrea/tests/mempool/mod.rs index 9e046b1905..0fe91211bb 100644 --- a/bin/citrea/tests/mempool/mod.rs +++ b/bin/citrea/tests/mempool/mod.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use alloy::signers::local::PrivateKeySigner; use alloy::signers::Signer; -use citrea_sequencer::SequencerConfig; +use citrea_common::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use reth_primitives::{Address, BlockNumberOrTag}; use tokio::task::JoinHandle; diff --git a/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs b/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs index dc13c71c02..99827bf2ce 100644 --- a/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs +++ b/bin/citrea/tests/soft_confirmation_rule_enforcer/mod.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use citrea_sequencer::SequencerConfig; +use citrea_common::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use sov_mock_da::{MockAddress, MockDaService}; use tokio::time::sleep; diff --git a/bin/citrea/tests/test_helpers/mod.rs b/bin/citrea/tests/test_helpers/mod.rs index 232f8f1635..ee2fa729c0 100644 --- a/bin/citrea/tests/test_helpers/mod.rs +++ b/bin/citrea/tests/test_helpers/mod.rs @@ -5,8 +5,11 @@ use std::time::{Duration, SystemTime}; use anyhow::bail; use borsh::BorshDeserialize; use citrea::{CitreaRollupBlueprint, MockDemoRollup}; +use citrea_common::{ + FullNodeConfig, ProverConfig, RollupPublicKeys, RpcConfig, RunnerConfig, SequencerConfig, + StorageConfig, +}; use citrea_primitives::TEST_PRIVATE_KEY; -use citrea_sequencer::SequencerConfig; use citrea_stf::genesis_config::GenesisPaths; use sov_mock_da::{MockAddress, MockBlock, MockDaConfig, MockDaService}; use sov_modules_api::default_signature::private_key::DefaultPrivateKey; @@ -14,9 +17,6 @@ use sov_modules_api::PrivateKey; use sov_rollup_interface::da::{BlobReaderTrait, DaData, SequencerCommitment}; use sov_rollup_interface::services::da::{DaService, SlotData}; use sov_rollup_interface::zk::Proof; -use sov_stf_runner::{ - FullNodeConfig, ProverConfig, RollupPublicKeys, RpcConfig, RunnerConfig, StorageConfig, -}; use tempfile::TempDir; use tokio::sync::oneshot; use tokio::time::sleep; diff --git a/build-push/nightly/Dockerfile b/build-push/nightly/Dockerfile new file mode 100644 index 0000000000..c9ebc11357 --- /dev/null +++ b/build-push/nightly/Dockerfile @@ -0,0 +1,12 @@ +FROM debian:bookworm-slim + +RUN apt-get update && \ + apt-get install -y --no-install-recommends ca-certificates && \ + update-ca-certificates && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app + +COPY citrea /app/citrea + +ENTRYPOINT ["./citrea"] \ No newline at end of file diff --git a/crates/bitcoin-da/Cargo.toml b/crates/bitcoin-da/Cargo.toml index 1a6f25ac1b..ab4504c01f 100644 --- a/crates/bitcoin-da/Cargo.toml +++ b/crates/bitcoin-da/Cargo.toml @@ -23,6 +23,7 @@ borsh = { workspace = true } hex = { workspace = true, features = ["serde"] } pin-project = { workspace = true, optional = true, features = [] } rand = { workspace = true } +reqwest = { workspace = true, optional = true } serde = { workspace = true } serde_json = { workspace = true, features = ["raw_value"] } thiserror = { workspace = true } @@ -46,4 +47,5 @@ native = [ "sov-rollup-interface/native", "dep:citrea-primitives", "dep:bitcoincore-rpc", + "dep:reqwest", ] diff --git a/crates/bitcoin-da/src/service.rs b/crates/bitcoin-da/src/service.rs index 0ed159ec65..09224a0438 100644 --- a/crates/bitcoin-da/src/service.rs +++ b/crates/bitcoin-da/src/service.rs @@ -56,6 +56,9 @@ use crate::REVEAL_OUTPUT_AMOUNT; pub const FINALITY_DEPTH: u64 = 8; // blocks const POLLING_INTERVAL: u64 = 10; // seconds +const MEMPOOL_SPACE_URL: &str = "https://mempool.space/"; +const MEMPOOL_SPACE_RECOMMENDED_FEE_ENDPOINT: &str = "api/v1/fees/recommended"; + #[derive(PartialEq, Eq, PartialOrd, Ord, core::hash::Hash)] pub struct TxidWrapper(Txid); impl From for [u8; 32] { @@ -544,8 +547,13 @@ impl BitcoinService { #[instrument(level = "trace", skip_all, ret)] pub async fn get_fee_rate_as_sat_vb(&self) -> Result { - let smart_fee = self.client.estimate_smart_fee(1, None).await?; - let sat_vkb = smart_fee.fee_rate.map_or(1000, |rate| rate.to_sat()); + // If network is regtest or signet, mempool space is not available + let smart_fee = get_fee_rate_from_mempool_space(self.network).await?.or(self + .client + .estimate_smart_fee(1, None) + .await? + .fee_rate); + let sat_vkb = smart_fee.map_or(1000, |rate| rate.to_sat()); tracing::debug!("Fee rate: {} sat/vb", sat_vkb / 1000); Ok(sat_vkb / 1000) @@ -980,6 +988,32 @@ fn calculate_witness_root(txdata: &[TransactionWrapper]) -> [u8; 32] { BitcoinMerkleTree::new(hashes).root() } +pub(crate) async fn get_fee_rate_from_mempool_space( + network: bitcoin::Network, +) -> Result> { + let url = match network { + bitcoin::Network::Bitcoin => format!( + // Mainnet + "{}{}", + MEMPOOL_SPACE_URL, MEMPOOL_SPACE_RECOMMENDED_FEE_ENDPOINT + ), + bitcoin::Network::Testnet => format!( + "{}testnet4/{}", + MEMPOOL_SPACE_URL, MEMPOOL_SPACE_RECOMMENDED_FEE_ENDPOINT + ), + _ => return Ok(None), + }; + let fee_rate = reqwest::get(url) + .await? + .json::() + .await? + .get("fastestFee") + .and_then(|fee| fee.as_u64()) + .map(|fee| Amount::from_sat(fee * 1000)); // multiply by 1000 to convert to sat/vkb + + Ok(fee_rate) +} + #[cfg(test)] mod tests { use core::str::FromStr; @@ -995,7 +1029,7 @@ mod tests { use sov_rollup_interface::da::{DaVerifier, SequencerCommitment}; use sov_rollup_interface::services::da::{DaService, SlotData}; - use super::BitcoinService; + use super::{get_fee_rate_from_mempool_space, BitcoinService}; use crate::helpers::parsers::parse_hex_transaction; use crate::helpers::test_utils::{get_mock_data, get_mock_txs}; use crate::service::BitcoinServiceConfig; @@ -1453,4 +1487,24 @@ mod tests { "Publickey recovered incorrectly!" ); } + + #[tokio::test] + async fn test_mempool_space_fee_rate() { + let _fee_rate = get_fee_rate_from_mempool_space(bitcoin::Network::Bitcoin) + .await + .unwrap() + .unwrap(); + let _fee_rate = get_fee_rate_from_mempool_space(bitcoin::Network::Testnet) + .await + .unwrap() + .unwrap(); + assert!(get_fee_rate_from_mempool_space(bitcoin::Network::Regtest) + .await + .unwrap() + .is_none()); + assert!(get_fee_rate_from_mempool_space(bitcoin::Network::Signet) + .await + .unwrap() + .is_none()); + } } diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index d90fd06fae..80dc92fbe3 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -14,18 +14,28 @@ resolver = "2" # 3rd-party deps anyhow = { workspace = true } backoff = { workspace = true } +borsh = { workspace = true } futures = { workspace = true } +hex = { workspace = true } hyper = { workspace = true } jsonrpsee = { workspace = true, features = ["http-client", "server"] } lru = { workspace = true } +serde = { workspace = true } tokio = { workspace = true } tokio-util = { workspace = true } +toml = { workspace = true } tower-http = { workspace = true } tracing = { workspace = true } # Sov SDK deps sov-db = { path = "../sovereign-sdk/full-node/db/sov-db" } sov-rollup-interface = { path = "../sovereign-sdk/rollup-interface" } +sov-stf-runner = { path = "../sovereign-sdk/full-node/sov-stf-runner" } # Citrea citrea-primitives = { path = "../primitives/" } +citrea-pruning = { path = "../pruning" } + +[dev-dependencies] +sov-mock-da = { path = "../sovereign-sdk/adapters/mock-da", features = ["native"] } +tempfile = { workspace = true } diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs b/crates/common/src/config.rs similarity index 66% rename from crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs rename to crates/common/src/config.rs index c5cbac51bb..3726d73109 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/src/config.rs +++ b/crates/common/src/config.rs @@ -5,9 +5,7 @@ use std::path::{Path, PathBuf}; use citrea_pruning::PruningConfig; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; - -use crate::ProverGuestRunConfig; - +use sov_stf_runner::ProverGuestRunConfig; /// Runner configuration. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct RunnerConfig { @@ -163,6 +161,74 @@ pub fn from_toml_path, R: DeserializeOwned>(path: P) -> anyhow::R Ok(result) } +/// Rollup Configuration +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct SequencerConfig { + /// Private key of the sequencer + pub private_key: String, + /// Min. soft confirmaitons for sequencer to commit + pub min_soft_confirmations_per_commitment: u64, + /// Whether or not the sequencer is running in test mode + pub test_mode: bool, + /// Limit for the number of deposit transactions to be included in the block + pub deposit_mempool_fetch_limit: usize, + /// Sequencer specific mempool config + pub mempool_conf: SequencerMempoolConfig, + /// DA layer update loop interval in ms + pub da_update_interval_ms: u64, + /// Block production interval in ms + pub block_production_interval_ms: u64, +} + +impl Default for SequencerConfig { + fn default() -> Self { + SequencerConfig { + private_key: "1212121212121212121212121212121212121212121212121212121212121212" + .to_string(), + min_soft_confirmations_per_commitment: 4, + test_mode: true, + deposit_mempool_fetch_limit: 10, + block_production_interval_ms: 100, + da_update_interval_ms: 100, + mempool_conf: Default::default(), + } + } +} + +/// Mempool Config for the sequencer +/// Read: https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design +#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] +pub struct SequencerMempoolConfig { + /// Max number of transactions in the pending sub-pool + pub pending_tx_limit: u64, + /// Max megabytes of transactions in the pending sub-pool + pub pending_tx_size: u64, + /// Max number of transactions in the queued sub-pool + pub queue_tx_limit: u64, + /// Max megabytes of transactions in the queued sub-pool + pub queue_tx_size: u64, + /// Max number of transactions in the base-fee sub-pool + pub base_fee_tx_limit: u64, + /// Max megabytes of transactions in the base-fee sub-pool + pub base_fee_tx_size: u64, + /// Max number of executable transaction slots guaranteed per account + pub max_account_slots: u64, +} + +impl Default for SequencerMempoolConfig { + fn default() -> Self { + Self { + pending_tx_limit: 100000, + pending_tx_size: 200, + queue_tx_limit: 100000, + queue_tx_size: 200, + base_fee_tx_limit: 100000, + base_fee_tx_size: 200, + max_account_slots: 16, + } + } +} + #[cfg(test)] mod tests { use std::io::Write; @@ -264,4 +330,47 @@ mod tests { }; assert_eq!(config, expected); } + #[test] + fn test_correct_config_sequencer() { + let config = r#" + private_key = "1212121212121212121212121212121212121212121212121212121212121212" + min_soft_confirmations_per_commitment = 123 + test_mode = false + deposit_mempool_fetch_limit = 10 + da_update_interval_ms = 1000 + block_production_interval_ms = 1000 + [mempool_conf] + pending_tx_limit = 100000 + pending_tx_size = 200 + queue_tx_limit = 100000 + queue_tx_size = 200 + base_fee_tx_limit = 100000 + base_fee_tx_size = 200 + max_account_slots = 16 + "#; + + let config_file = create_config_from(config); + + let config: SequencerConfig = from_toml_path(config_file.path()).unwrap(); + + let expected = SequencerConfig { + private_key: "1212121212121212121212121212121212121212121212121212121212121212" + .to_string(), + min_soft_confirmations_per_commitment: 123, + test_mode: false, + deposit_mempool_fetch_limit: 10, + mempool_conf: SequencerMempoolConfig { + pending_tx_limit: 100000, + pending_tx_size: 200, + queue_tx_limit: 100000, + queue_tx_size: 200, + base_fee_tx_limit: 100000, + base_fee_tx_size: 200, + max_account_slots: 16, + }, + da_update_interval_ms: 1000, + block_production_interval_ms: 1000, + }; + assert_eq!(config, expected); + } } diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index 9b1396d12c..395ad9ab93 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -2,8 +2,10 @@ #![forbid(unsafe_code)] pub mod cache; +pub mod config; pub mod da; pub mod error; pub mod rpc; pub mod tasks; pub mod utils; +pub use config::*; diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 6da20c216a..1ba578959e 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -1,9 +1,11 @@ use std::collections::{HashMap, HashSet}; +use borsh::de::BorshDeserialize; use sov_db::ledger_db::SharedLedgerOps; use sov_db::schema::types::BatchNumber; -use sov_rollup_interface::da::SequencerCommitment; +use sov_rollup_interface::da::{BlobReaderTrait, DaDataBatchProof, DaSpec, SequencerCommitment}; use sov_rollup_interface::rpc::SoftConfirmationStatus; +use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::stf::StateDiff; pub fn merge_state_diffs(old_diff: StateDiff, new_diff: StateDiff) -> StateDiff { @@ -13,18 +15,6 @@ pub fn merge_state_diffs(old_diff: StateDiff, new_diff: StateDiff) -> StateDiff new_diff_map.into_iter().collect() } -/// Remove finalized commitments using the end block number of the L2 range. -/// This is basically filtering out finalized soft confirmations. -pub fn filter_out_finalized_commitments( - ledger_db: &DB, - sequencer_commitments: &[SequencerCommitment], -) -> anyhow::Result<(Vec, Vec)> { - filter_out_commitments_by_status( - ledger_db, - sequencer_commitments, - SoftConfirmationStatus::Finalized, - ) -} /// Remove proven commitments using the end block number of the L2 range. /// This is basically filtering out proven soft confirmations. pub fn filter_out_proven_commitments( @@ -38,7 +28,7 @@ pub fn filter_out_proven_commitments( ) } -pub fn filter_out_commitments_by_status( +fn filter_out_commitments_by_status( ledger_db: &DB, sequencer_commitments: &[SequencerCommitment], exclude_status: SoftConfirmationStatus, @@ -76,7 +66,7 @@ pub fn filter_out_commitments_by_status( } pub fn check_l2_range_exists( - ledger_db: DB, + ledger_db: &DB, first_l2_height_of_l1: u64, last_l2_height_of_l1: u64, ) -> bool { @@ -89,3 +79,24 @@ pub fn check_l2_range_exists( } false } + +pub fn extract_sequencer_commitments( + sequencer_da_pub_key: &[u8], + da_data: &mut [<::Spec as DaSpec>::BlobTransaction], +) -> Vec { + let mut sequencer_commitments = vec![]; + // if we don't do this, the zk circuit can't read the sequencer commitments + da_data.iter_mut().for_each(|blob| { + blob.full_data(); + }); + da_data.iter_mut().for_each(|tx| { + let data = DaDataBatchProof::try_from_slice(tx.full_data()); + // Check for commitment + if tx.sender().as_ref() == sequencer_da_pub_key { + if let Ok(DaDataBatchProof::SequencerCommitment(seq_com)) = data { + sequencer_commitments.push(seq_com); + } + } + }); + sequencer_commitments +} diff --git a/crates/fullnode/src/da_block_handler.rs b/crates/fullnode/src/da_block_handler.rs index a1df6d9f6b..ec4726dfff 100644 --- a/crates/fullnode/src/da_block_handler.rs +++ b/crates/fullnode/src/da_block_handler.rs @@ -160,7 +160,7 @@ where // If the L2 range does not exist, we break off the current process call // We retry the L1 block at a later tick. if !check_l2_range_exists( - self.ledger_db.clone(), + &self.ledger_db, sequencer_commitments[0].l2_start_block_number, sequencer_commitments[sequencer_commitments.len() - 1].l2_end_block_number, ) { diff --git a/crates/fullnode/src/runner.rs b/crates/fullnode/src/runner.rs index 07dbb45ebe..93269bfbc6 100644 --- a/crates/fullnode/src/runner.rs +++ b/crates/fullnode/src/runner.rs @@ -8,6 +8,7 @@ use backoff::ExponentialBackoffBuilder; use citrea_common::cache::L1BlockCache; use citrea_common::da::get_da_block_at_height; use citrea_common::tasks::manager::TaskManager; +use citrea_common::{RollupPublicKeys, RpcConfig, RunnerConfig}; use citrea_primitives::types::SoftConfirmationHash; use citrea_pruning::{Pruner, PruningConfig}; use jsonrpsee::core::client::Error as JsonrpseeError; @@ -26,7 +27,7 @@ pub use sov_rollup_interface::stf::BatchReceipt; use sov_rollup_interface::stf::StateTransitionFunction; use sov_rollup_interface::storage::HierarchicalStorageManager; use sov_rollup_interface::zk::{Zkvm, ZkvmHost}; -use sov_stf_runner::{InitVariant, RollupPublicKeys, RpcConfig, RunnerConfig}; +use sov_stf_runner::InitVariant; use tokio::sync::{broadcast, mpsc, oneshot, Mutex}; use tokio::time::{sleep, Duration}; use tokio::{select, signal}; diff --git a/crates/fullnode/tests/runner_initialization_tests.rs b/crates/fullnode/tests/runner_initialization_tests.rs index a5614bdbef..3c30b90e05 100644 --- a/crates/fullnode/tests/runner_initialization_tests.rs +++ b/crates/fullnode/tests/runner_initialization_tests.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::sync::Arc; +use citrea_common::{FullNodeConfig, RollupPublicKeys, RpcConfig, RunnerConfig, StorageConfig}; use citrea_fullnode::CitreaFullnode; use sov_db::ledger_db::LedgerDB; use sov_db::rocks_db_config::RocksdbConfig; @@ -10,10 +11,7 @@ use sov_prover_storage_manager::ProverStorageManager; use sov_rollup_interface::fork::{Fork, ForkManager}; use sov_rollup_interface::spec::SpecId; use sov_state::DefaultStorageSpec; -use sov_stf_runner::{ - FullNodeConfig, InitVariant, RollupPublicKeys, RpcConfig, RunnerConfig, StorageConfig, -}; - +use sov_stf_runner::InitVariant; mod hash_stf; use hash_stf::HashStf; diff --git a/crates/prover/Cargo.toml b/crates/prover/Cargo.toml index 02bb3e34f7..39bbe5b5b7 100644 --- a/crates/prover/Cargo.toml +++ b/crates/prover/Cargo.toml @@ -19,6 +19,7 @@ sequencer-client = { path = "../sequencer-client" } # Sov SDK deps sov-db = { path = "../sovereign-sdk/full-node/db/sov-db" } sov-modules-api = { path = "../sovereign-sdk/module-system/sov-modules-api", default-features = false } +sov-modules-core = { path = "../sovereign-sdk/module-system/sov-modules-core" } sov-modules-rollup-blueprint = { path = "../sovereign-sdk/module-system/sov-modules-rollup-blueprint" } sov-modules-stf-blueprint = { path = "../sovereign-sdk/module-system/sov-modules-stf-blueprint", features = ["native"] } sov-rollup-interface = { path = "../sovereign-sdk/rollup-interface" } @@ -31,7 +32,7 @@ backoff = { workspace = true } borsh = { workspace = true } futures = { workspace = true } hex = { workspace = true } -jsonrpsee = { workspace = true } +jsonrpsee = { workspace = true, features = ["http-client", "server", "client"] } num_cpus = { workspace = true } parking_lot = { workspace = true } rand = { workspace = true } diff --git a/crates/prover/src/da_block_handler.rs b/crates/prover/src/da_block_handler.rs index 1bdaf7b4e7..b128b9ec8a 100644 --- a/crates/prover/src/da_block_handler.rs +++ b/crates/prover/src/da_block_handler.rs @@ -10,21 +10,24 @@ use borsh::{BorshDeserialize, BorshSerialize}; use citrea_common::cache::L1BlockCache; use citrea_common::da::get_da_block_at_height; use citrea_common::utils::{ - check_l2_range_exists, filter_out_proven_commitments, merge_state_diffs, + check_l2_range_exists, extract_sequencer_commitments, filter_out_proven_commitments, + merge_state_diffs, }; +use citrea_common::ProverConfig; use citrea_primitives::MAX_TXBODY_SIZE; use rand::Rng; use serde::de::DeserializeOwned; use serde::Serialize; use sov_db::ledger_db::ProverLedgerOps; use sov_db::schema::types::{BatchNumber, SlotNumber, StoredProof, StoredStateTransition}; -use sov_modules_api::{BlobReaderTrait, DaSpec, SignedSoftConfirmation, StateDiff, Zkvm}; -use sov_rollup_interface::da::{BlockHeaderTrait, DaDataBatchProof, SequencerCommitment}; +use sov_modules_api::{BlobReaderTrait, DaSpec, StateDiff, Zkvm}; +use sov_rollup_interface::da::{BlockHeaderTrait, SequencerCommitment}; use sov_rollup_interface::rpc::SoftConfirmationStatus; use sov_rollup_interface::services::da::{DaService, SlotData}; +use sov_rollup_interface::soft_confirmation::SignedSoftConfirmation; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::zk::{Proof, StateTransitionData, ZkvmHost}; -use sov_stf_runner::{ProverConfig, ProverService}; +use sov_stf_runner::ProverService; use tokio::select; use tokio::sync::{mpsc, Mutex}; use tokio::time::{sleep, Duration}; @@ -176,7 +179,10 @@ where blob.full_data(); }); let mut sequencer_commitments: Vec = - self.extract_sequencer_commitments(l1_block.header().hash().into(), &mut da_data); + extract_sequencer_commitments::( + self.sequencer_da_pub_key.as_slice(), + &mut da_data, + ); if sequencer_commitments.is_empty() { info!("No sequencer commitment found at height {}", l1_height,); @@ -207,7 +213,7 @@ where // the outer loop / select to make room for other tasks to run. // We retry the L1 block there as well. if !check_l2_range_exists( - self.ledger_db.clone(), + &self.ledger_db, sequencer_commitments[0].l2_start_block_number, sequencer_commitments[sequencer_commitments.len() - 1].l2_end_block_number, ) { @@ -246,8 +252,10 @@ where let hash = da_block_header_of_commitments.hash(); if should_prove { - let sequencer_commitments_groups = - self.break_sequencer_commitments_into_groups(&sequencer_commitments)?; + let sequencer_commitments_groups = break_sequencer_commitments_into_groups( + &self.ledger_db, + &sequencer_commitments, + )?; let submitted_proofs = self .ledger_db @@ -296,87 +304,6 @@ where Ok(()) } - fn extract_sequencer_commitments( - &self, - l1_block_hash: [u8; 32], - da_data: &mut [<::Spec as DaSpec>::BlobTransaction], - ) -> Vec { - let mut sequencer_commitments = vec![]; - // if we don't do this, the zk circuit can't read the sequencer commitments - da_data.iter_mut().for_each(|blob| { - blob.full_data(); - }); - da_data.iter_mut().for_each(|tx| { - let data = DaDataBatchProof::try_from_slice(tx.full_data()); - // Check for commitment - if tx.sender().as_ref() == self.sequencer_da_pub_key.as_slice() { - if let Ok(DaDataBatchProof::SequencerCommitment(seq_com)) = data { - sequencer_commitments.push(seq_com); - } else { - tracing::warn!( - "Found broken DA data in block 0x{}: {:?}", - hex::encode(l1_block_hash), - data - ); - } - } - }); - sequencer_commitments - } - - fn break_sequencer_commitments_into_groups( - &self, - sequencer_commitments: &[SequencerCommitment], - ) -> anyhow::Result>> { - let mut result_range = vec![]; - - let mut range = 0usize..=0usize; - let mut cumulative_state_diff = StateDiff::new(); - for (index, sequencer_commitment) in sequencer_commitments.iter().enumerate() { - let mut sequencer_commitment_state_diff = StateDiff::new(); - for l2_height in sequencer_commitment.l2_start_block_number - ..=sequencer_commitment.l2_end_block_number - { - let state_diff = self - .ledger_db - .get_l2_state_diff(BatchNumber(l2_height))? - .ok_or(anyhow!( - "Could not find state diff for L2 range {}-{}", - sequencer_commitment.l2_start_block_number, - sequencer_commitment.l2_end_block_number - ))?; - sequencer_commitment_state_diff = - merge_state_diffs(sequencer_commitment_state_diff, state_diff); - } - cumulative_state_diff = merge_state_diffs( - cumulative_state_diff, - sequencer_commitment_state_diff.clone(), - ); - - let compressed_state_diff = compress_blob(&borsh::to_vec(&cumulative_state_diff)?); - - // Threshold is checked by comparing compressed state diff size as the data will be compressed before it is written on DA - let state_diff_threshold_reached = compressed_state_diff.len() > MAX_TXBODY_SIZE; - - if state_diff_threshold_reached { - // We've exceeded the limit with the current commitments - // so we have to stop at the previous one. - result_range.push(range); - - // Reset the cumulative state diff to be equal to the current commitment state diff - cumulative_state_diff = sequencer_commitment_state_diff; - range = index..=index; - } else { - range = *range.start()..=index; - } - } - - // If the last group hasn't been reset because it has not reached the threshold, - // Add it anyway - result_range.push(range); - Ok(result_range) - } - async fn create_state_transition_data( &self, sequencer_commitments: &[SequencerCommitment], @@ -394,12 +321,13 @@ where state_transition_witnesses, soft_confirmations, da_block_headers_of_soft_confirmations, - ) = self - .get_state_transition_data_from_commitments( - &sequencer_commitments[sequencer_commitments_range.clone()], - &self.da_service, - ) - .await?; + ) = get_state_transition_data_from_commitments( + &sequencer_commitments[sequencer_commitments_range.clone()], + &self.da_service, + &self.ledger_db, + &self.l1_block_cache, + ) + .await?; let initial_state_root = self .ledger_db .get_l2_state_root::(first_l2_height_of_l1 - 1)? @@ -446,87 +374,6 @@ where Ok(transition_data) } - async fn get_state_transition_data_from_commitments( - &self, - sequencer_commitments: &[SequencerCommitment], - da_service: &Arc, - ) -> Result, anyhow::Error> { - let mut state_transition_witnesses: VecDeque> = VecDeque::new(); - let mut soft_confirmations: VecDeque> = VecDeque::new(); - let mut da_block_headers_of_soft_confirmations: VecDeque< - Vec<<::Spec as DaSpec>::BlockHeader>, - > = VecDeque::new(); - for sequencer_commitment in sequencer_commitments.to_owned().iter() { - // get the l2 height ranges of each seq_commitments - let mut witnesses = vec![]; - let start_l2 = sequencer_commitment.l2_start_block_number; - let end_l2 = sequencer_commitment.l2_end_block_number; - let soft_confirmations_in_commitment = match self - .ledger_db - .get_soft_confirmation_range(&(BatchNumber(start_l2)..=BatchNumber(end_l2))) - { - Ok(soft_confirmations) => soft_confirmations, - Err(e) => { - return Err(anyhow!( - "Failed to get soft confirmations from the ledger db: {}", - e - )); - } - }; - let mut commitment_soft_confirmations = vec![]; - let mut da_block_headers_to_push: Vec< - <::Spec as DaSpec>::BlockHeader, - > = vec![]; - for soft_confirmation in soft_confirmations_in_commitment { - if da_block_headers_to_push.is_empty() - || da_block_headers_to_push.last().unwrap().height() - != soft_confirmation.da_slot_height - { - let filtered_block = match get_da_block_at_height( - da_service, - soft_confirmation.da_slot_height, - self.l1_block_cache.clone(), - ) - .await - { - Ok(block) => block, - Err(_) => { - return Err(anyhow!( - "Error while fetching DA block at height: {}", - soft_confirmation.da_slot_height - )); - } - }; - da_block_headers_to_push.push(filtered_block.header().clone()); - } - let signed_soft_confirmation: SignedSoftConfirmation = - soft_confirmation.clone().into(); - commitment_soft_confirmations.push(signed_soft_confirmation.clone()); - } - soft_confirmations.push_back(commitment_soft_confirmations); - - da_block_headers_of_soft_confirmations.push_back(da_block_headers_to_push); - for l2_height in sequencer_commitment.l2_start_block_number - ..=sequencer_commitment.l2_end_block_number - { - let witness = match self.ledger_db.get_l2_witness::(l2_height) { - Ok(witness) => witness, - Err(e) => { - return Err(anyhow!("Failed to get witness from the ledger db: {}", e)) - } - }; - - witnesses.push(witness.expect("A witness must be present")); - } - state_transition_witnesses.push_back(witnesses); - } - Ok(( - state_transition_witnesses, - soft_confirmations, - da_block_headers_of_soft_confirmations, - )) - } - async fn prove_state_transition( &self, transition_data: StateTransitionData, @@ -730,3 +577,137 @@ async fn sync_l1( sleep(Duration::from_secs(2)).await; } } + +pub(crate) async fn get_state_transition_data_from_commitments< + Da: DaService, + DB: ProverLedgerOps, + Witness: DeserializeOwned, +>( + sequencer_commitments: &[SequencerCommitment], + da_service: &Arc, + ledger_db: &DB, + l1_block_cache: &Arc>>, +) -> Result, anyhow::Error> { + let mut state_transition_witnesses: VecDeque> = VecDeque::new(); + let mut soft_confirmations: VecDeque> = VecDeque::new(); + let mut da_block_headers_of_soft_confirmations: VecDeque< + Vec<<::Spec as DaSpec>::BlockHeader>, + > = VecDeque::new(); + for sequencer_commitment in sequencer_commitments.to_owned().iter() { + // get the l2 height ranges of each seq_commitments + let mut witnesses = vec![]; + let start_l2 = sequencer_commitment.l2_start_block_number; + let end_l2 = sequencer_commitment.l2_end_block_number; + let soft_confirmations_in_commitment = match ledger_db + .get_soft_confirmation_range(&(BatchNumber(start_l2)..=BatchNumber(end_l2))) + { + Ok(soft_confirmations) => soft_confirmations, + Err(e) => { + return Err(anyhow!( + "Failed to get soft confirmations from the ledger db: {}", + e + )); + } + }; + let mut commitment_soft_confirmations = vec![]; + let mut da_block_headers_to_push: Vec<<::Spec as DaSpec>::BlockHeader> = + vec![]; + for soft_confirmation in soft_confirmations_in_commitment { + if da_block_headers_to_push.is_empty() + || da_block_headers_to_push.last().unwrap().height() + != soft_confirmation.da_slot_height + { + let filtered_block = match get_da_block_at_height( + da_service, + soft_confirmation.da_slot_height, + l1_block_cache.clone(), + ) + .await + { + Ok(block) => block, + Err(_) => { + return Err(anyhow!( + "Error while fetching DA block at height: {}", + soft_confirmation.da_slot_height + )); + } + }; + da_block_headers_to_push.push(filtered_block.header().clone()); + } + let signed_soft_confirmation: SignedSoftConfirmation = soft_confirmation.clone().into(); + commitment_soft_confirmations.push(signed_soft_confirmation.clone()); + } + soft_confirmations.push_back(commitment_soft_confirmations); + + da_block_headers_of_soft_confirmations.push_back(da_block_headers_to_push); + for l2_height in + sequencer_commitment.l2_start_block_number..=sequencer_commitment.l2_end_block_number + { + let witness = match ledger_db.get_l2_witness::(l2_height) { + Ok(witness) => witness, + Err(e) => return Err(anyhow!("Failed to get witness from the ledger db: {}", e)), + }; + + witnesses.push(witness.expect("A witness must be present")); + } + state_transition_witnesses.push_back(witnesses); + } + Ok(( + state_transition_witnesses, + soft_confirmations, + da_block_headers_of_soft_confirmations, + )) +} + +pub(crate) fn break_sequencer_commitments_into_groups( + ledger_db: &DB, + sequencer_commitments: &[SequencerCommitment], +) -> anyhow::Result>> { + let mut result_range = vec![]; + + let mut range = 0usize..=0usize; + let mut cumulative_state_diff = StateDiff::new(); + for (index, sequencer_commitment) in sequencer_commitments.iter().enumerate() { + let mut sequencer_commitment_state_diff = StateDiff::new(); + for l2_height in + sequencer_commitment.l2_start_block_number..=sequencer_commitment.l2_end_block_number + { + let state_diff = + ledger_db + .get_l2_state_diff(BatchNumber(l2_height))? + .ok_or(anyhow!( + "Could not find state diff for L2 range {}-{}", + sequencer_commitment.l2_start_block_number, + sequencer_commitment.l2_end_block_number + ))?; + sequencer_commitment_state_diff = + merge_state_diffs(sequencer_commitment_state_diff, state_diff); + } + cumulative_state_diff = merge_state_diffs( + cumulative_state_diff, + sequencer_commitment_state_diff.clone(), + ); + + let compressed_state_diff = compress_blob(&borsh::to_vec(&cumulative_state_diff)?); + + // Threshold is checked by comparing compressed state diff size as the data will be compressed before it is written on DA + let state_diff_threshold_reached = compressed_state_diff.len() > MAX_TXBODY_SIZE; + + if state_diff_threshold_reached { + // We've exceeded the limit with the current commitments + // so we have to stop at the previous one. + result_range.push(range); + + // Reset the cumulative state diff to be equal to the current commitment state diff + cumulative_state_diff = sequencer_commitment_state_diff; + range = index..=index; + } else { + range = *range.start()..=index; + } + } + + // If the last group hasn't been reset because it has not reached the threshold, + // Add it anyway + result_range.push(range); + Ok(result_range) +} diff --git a/crates/prover/src/lib.rs b/crates/prover/src/lib.rs index aac2e9e5db..0bc2caca0f 100644 --- a/crates/prover/src/lib.rs +++ b/crates/prover/src/lib.rs @@ -10,6 +10,7 @@ mod da_block_handler; pub mod prover_service; mod runner; pub use runner::*; +mod rpc; /// Dependencies needed to run the rollup. pub struct Prover { @@ -37,7 +38,7 @@ impl Prover { /// Only run the rpc. pub async fn run_rpc(mut self) -> Result<(), anyhow::Error> { - self.runner.start_rpc_server(self.rpc_methods, None).await; + self.runner.start_rpc_server(self.rpc_methods, None).await?; Ok(()) } @@ -47,7 +48,7 @@ impl Prover { channel: Option>, ) -> Result<(), anyhow::Error> { let mut runner = self.runner; - runner.start_rpc_server(self.rpc_methods, channel).await; + runner.start_rpc_server(self.rpc_methods, channel).await?; runner.run().await?; Ok(()) diff --git a/crates/prover/src/prover_service/parallel/mod.rs b/crates/prover/src/prover_service/parallel/mod.rs index 1c7d79205a..8b61796b98 100644 --- a/crates/prover/src/prover_service/parallel/mod.rs +++ b/crates/prover/src/prover_service/parallel/mod.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use async_trait::async_trait; use borsh::{BorshDeserialize, BorshSerialize}; +use citrea_common::config::ProverConfig; use citrea_stf::verifier::StateTransitionVerifier; use parking_lot::Mutex; use prover::Prover; @@ -13,7 +14,6 @@ use sov_rollup_interface::da::{DaData, DaSpec}; use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::stf::StateTransitionFunction; use sov_rollup_interface::zk::{Proof, StateTransitionData, ZkvmHost}; -use sov_stf_runner::config::ProverConfig; use sov_stf_runner::{ ProofProcessingStatus, ProverGuestRunConfig, ProverService, ProverServiceError, WitnessSubmissionStatus, diff --git a/crates/prover/src/rpc.rs b/crates/prover/src/rpc.rs new file mode 100644 index 0000000000..89f7662e37 --- /dev/null +++ b/crates/prover/src/rpc.rs @@ -0,0 +1,329 @@ +use std::marker::PhantomData; +use std::sync::Arc; + +use borsh::BorshSerialize; +use citrea_common::cache::L1BlockCache; +use citrea_common::utils::{ + check_l2_range_exists, extract_sequencer_commitments, filter_out_proven_commitments, +}; +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee::types::error::{INTERNAL_ERROR_CODE, INTERNAL_ERROR_MSG}; +use jsonrpsee::types::ErrorObjectOwned; +use serde::{Deserialize, Serialize}; +use sov_db::ledger_db::ProverLedgerOps; +use sov_db::schema::types::BatchNumber; +use sov_modules_core::{Spec, Storage}; +use sov_rollup_interface::da::{DaSpec, SequencerCommitment}; +use sov_rollup_interface::services::da::{DaService, SlotData}; +use sov_rollup_interface::zk::StateTransitionData; +use tokio::sync::Mutex; +use tracing::{debug, error}; + +use crate::da_block_handler::{ + break_sequencer_commitments_into_groups, get_state_transition_data_from_commitments, +}; + +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct ProverInputResponse { + pub commitment_range: (u32, u32), + pub l1_block_height: u64, + pub encoded_serialized_state_transition_data: String, +} + +pub(crate) struct RpcContext +where + C: sov_modules_api::Context, + Da: DaService, + DB: ProverLedgerOps, +{ + pub da_service: Arc, + pub ledger: DB, + pub sequencer_da_pub_key: Vec, + pub sequencer_pub_key: Vec, + pub l1_block_cache: Arc>>, + pub phantom: PhantomData C>, +} + +#[rpc(client, server)] +pub trait ProverRpc { + /// Generate state transition data for the given L1 block height, and return the data as a borsh serialized hex string. + #[method(name = "prover_generateInput")] + async fn generate_input( + &self, + l1_height: u64, + group_commitments: Option, + ) -> RpcResult>; +} + +pub struct ProverRpcServerImpl +where + C: sov_modules_api::Context, + Da: DaService, + DB: ProverLedgerOps + Send + Sync + 'static, +{ + context: Arc>, +} + +impl ProverRpcServerImpl +where + C: sov_modules_api::Context, + Da: DaService, + DB: ProverLedgerOps + Send + Sync + 'static, +{ + pub fn new(context: RpcContext) -> Self { + Self { + context: Arc::new(context), + } + } +} + +#[async_trait::async_trait] +impl + ProverRpcServer for ProverRpcServerImpl +{ + async fn generate_input( + &self, + l1_height: u64, + group_commitments: Option, + ) -> RpcResult> { + debug!("Prover: prover_generateInput"); + + let l1_block: ::FilteredBlock = self + .context + .da_service + .get_block_at(l1_height) + .await + .map_err(|e| { + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + })?; + + let mut da_data: Vec<<::Spec as DaSpec>::BlobTransaction> = + self.context.da_service.extract_relevant_blobs(&l1_block); + + let mut sequencer_commitments: Vec = extract_sequencer_commitments::( + self.context.sequencer_da_pub_key.as_slice(), + &mut da_data, + ); + + if sequencer_commitments.is_empty() { + return Err(ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!( + "No sequencer commitments found in block: {l1_height}", + )), + )); + } + + // Make sure all sequencer commitments are stored in ascending order. + // We sort before checking ranges to prevent substraction errors. + sequencer_commitments.sort(); + + // If the L2 range does not exist, we break off the local loop getting back to + // the outer loop / select to make room for other tasks to run. + // We retry the L1 block there as well. + let start_block_number = sequencer_commitments[0].l2_start_block_number; + let end_block_number = + sequencer_commitments[sequencer_commitments.len() - 1].l2_end_block_number; + + // If range is not synced yet return error + if !check_l2_range_exists(&self.context.ledger, start_block_number, end_block_number) { + return Err(ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!( + "L2 Range of commitments is not synced yet: {start_block_number} - {end_block_number}" + )), + )); + } + + let (sequencer_commitments, preproven_commitments) = + filter_out_proven_commitments(&self.context.ledger, &sequencer_commitments).map_err( + |e| { + error!("Error filtering out proven commitments: {:?}", e); + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + }, + )?; + + if sequencer_commitments.is_empty() { + return Err(ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!( + "All sequencer commitments are duplicates from a former DA block {}", + l1_height + )), + )); + } + + let da_block_header_of_commitments: <::Spec as DaSpec>::BlockHeader = + l1_block.header().clone(); + + let mut state_transition_responses = Vec::new(); + + let ranges = match group_commitments { + Some(true) => break_sequencer_commitments_into_groups( + &self.context.ledger, + &sequencer_commitments, + ) + .map_err(|e| { + error!("Error breaking sequencer commitments into groups: {:?}", e); + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + })?, + _ => vec![(0..=sequencer_commitments.len() - 1)], + }; + + for sequencer_commitments_range in ranges { + let first_l2_height_of_l1 = + sequencer_commitments[*sequencer_commitments_range.start()].l2_start_block_number; + let last_l2_height_of_l1 = + sequencer_commitments[*sequencer_commitments_range.end()].l2_end_block_number; + let ( + state_transition_witnesses, + soft_confirmations, + da_block_headers_of_soft_confirmations, + ) = get_state_transition_data_from_commitments( + &sequencer_commitments[sequencer_commitments_range.clone()], + &self.context.da_service, + &self.context.ledger, + &self.context.l1_block_cache, + ) + .await + .map_err(|e| { + error!( + "Error getting state transition data from commitments: {:?}", + e + ); + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + })?; + let initial_state_root = self + .context + .ledger + .get_l2_state_root::<<::Storage as Storage>::Root>( + first_l2_height_of_l1 - 1, + ) + .map_err(|e| { + error!("Error getting initial state root: {:?}", e); + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + })? + .expect("There should be a state root"); + let initial_batch_hash = self + .context + .ledger + .get_soft_confirmation_by_number(&BatchNumber(first_l2_height_of_l1)) + .map_err(|e| { + error!("Error getting initial batch hash: {:?}", e); + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + })? + .ok_or(ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!( + "Could not find soft batch at height {}", + first_l2_height_of_l1 + )), + ))? + .prev_hash; + + let final_state_root = self + .context + .ledger + .get_l2_state_root::<<::Storage as Storage>::Root>(last_l2_height_of_l1) + .map_err(|e| { + error!("Error getting final state root: {:?}", e); + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(format!("{e}",)), + ) + })? + .expect("There should be a state root"); + + let (inclusion_proof, completeness_proof) = self + .context + .da_service + .get_extraction_proof(&l1_block, &da_data) + .await; + + let state_transition_data: StateTransitionData< + <::Storage as Storage>::Root, + <::Storage as Storage>::Witness, + Da::Spec, + > = StateTransitionData { + initial_state_root, + final_state_root, + initial_batch_hash, + da_data: da_data.clone(), + da_block_header_of_commitments: da_block_header_of_commitments.clone(), + inclusion_proof, + completeness_proof, + soft_confirmations, + state_transition_witnesses, + da_block_headers_of_soft_confirmations, + preproven_commitments: preproven_commitments.to_vec(), + sequencer_commitments_range: ( + *sequencer_commitments_range.start() as u32, + *sequencer_commitments_range.end() as u32, + ), + sequencer_public_key: self.context.sequencer_pub_key.clone(), + sequencer_da_public_key: self.context.sequencer_da_pub_key.clone(), + }; + let serialized_state_transition = serialize_state_transition(state_transition_data); + + let response = ProverInputResponse { + commitment_range: ( + *sequencer_commitments_range.start() as u32, + *sequencer_commitments_range.end() as u32, + ), + l1_block_height: l1_height, + encoded_serialized_state_transition_data: hex::encode(serialized_state_transition), + }; + + state_transition_responses.push(response); + } + + Ok(state_transition_responses) + } +} + +fn serialize_state_transition(item: T) -> Vec { + borsh::to_vec(&item).expect("Risc0 hint serialization is infallible") +} + +pub fn create_rpc_module( + rpc_context: RpcContext, +) -> jsonrpsee::RpcModule> +where + C: sov_modules_api::Context, + Da: DaService, + DB: ProverLedgerOps + Send + Sync + 'static, +{ + let server = ProverRpcServerImpl::new(rpc_context); + + ProverRpcServer::into_rpc(server) +} diff --git a/crates/prover/src/runner.rs b/crates/prover/src/runner.rs index 668828389f..1e5e2d5236 100644 --- a/crates/prover/src/runner.rs +++ b/crates/prover/src/runner.rs @@ -4,12 +4,13 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use anyhow::bail; +use anyhow::{anyhow, bail}; use backoff::exponential::ExponentialBackoffBuilder; use backoff::future::retry as retry_backoff; use citrea_common::cache::L1BlockCache; use citrea_common::da::get_da_block_at_height; use citrea_common::tasks::manager::TaskManager; +use citrea_common::{ProverConfig, RollupPublicKeys, RpcConfig, RunnerConfig}; use citrea_primitives::types::SoftConfirmationHash; use jsonrpsee::core::client::Error as JsonrpseeError; use jsonrpsee::server::{BatchRequestConfig, ServerBuilder}; @@ -26,15 +27,14 @@ use sov_rollup_interface::services::da::DaService; use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::stf::StateTransitionFunction; use sov_rollup_interface::zk::ZkvmHost; -use sov_stf_runner::{ - InitVariant, ProverConfig, ProverService, RollupPublicKeys, RpcConfig, RunnerConfig, -}; +use sov_stf_runner::{InitVariant, ProverService}; use tokio::sync::{broadcast, mpsc, oneshot, Mutex}; use tokio::time::sleep; use tokio::{select, signal}; use tracing::{debug, error, info, instrument}; use crate::da_block_handler::L1BlockHandler; +use crate::rpc::{create_rpc_module, RpcContext}; type StateRoot = >::StateRoot; @@ -63,7 +63,7 @@ where sequencer_pub_key: Vec, sequencer_da_pub_key: Vec, phantom: std::marker::PhantomData, - prover_config: Option, + prover_config: ProverConfig, code_commitments_by_spec: HashMap, l1_block_cache: Arc>>, sync_blocks_count: u64, @@ -107,7 +107,7 @@ where mut storage_manager: Sm, init_variant: InitVariant, prover_service: Arc, - prover_config: Option, + prover_config: ProverConfig, code_commitments_by_spec: HashMap, fork_manager: ForkManager, soft_confirmation_tx: broadcast::Sender, @@ -163,20 +163,44 @@ where }) } + /// Creates a shared RpcContext with all required data. + fn create_rpc_context(&self) -> RpcContext { + RpcContext { + ledger: self.ledger_db.clone(), + da_service: self.da_service.clone(), + sequencer_da_pub_key: self.sequencer_da_pub_key.clone(), + sequencer_pub_key: self.sequencer_pub_key.clone(), + l1_block_cache: self.l1_block_cache.clone(), + phantom: std::marker::PhantomData, + } + } + + /// Updates the given RpcModule with Prover methods. + pub fn register_rpc_methods( + &self, + mut rpc_methods: jsonrpsee::RpcModule<()>, + ) -> Result, jsonrpsee::core::RegisterMethodError> { + let rpc_context = self.create_rpc_context(); + let rpc = create_rpc_module(rpc_context); + rpc_methods.merge(rpc)?; + Ok(rpc_methods) + } + /// Starts a RPC server with provided rpc methods. pub async fn start_rpc_server( &mut self, methods: RpcModule<()>, channel: Option>, - ) { - let bind_host = match self.rpc_config.bind_host.parse() { - Ok(bind_host) => bind_host, - Err(e) => { - error!("Failed to parse bind host: {}", e); - return; - } - }; - let listen_address = SocketAddr::new(bind_host, self.rpc_config.bind_port); + ) -> anyhow::Result<()> { + let methods = self.register_rpc_methods(methods)?; + + let listen_address = SocketAddr::new( + self.rpc_config + .bind_host + .parse() + .map_err(|e| anyhow!("Failed to parse bind host: {}", e))?, + self.rpc_config.bind_port, + ); let max_connections = self.rpc_config.max_connections; let max_subscriptions_per_connection = self.rpc_config.max_subscriptions_per_connection; @@ -223,6 +247,7 @@ where } } }); + Ok(()) } /// Runs the rollup. @@ -244,7 +269,7 @@ where }; let ledger_db = self.ledger_db.clone(); - let prover_config = self.prover_config.clone().unwrap(); + let prover_config = self.prover_config.clone(); let prover_service = self.prover_service.clone(); let da_service = self.da_service.clone(); let sequencer_pub_key = self.sequencer_pub_key.clone(); diff --git a/crates/sequencer/src/config.rs b/crates/sequencer/src/config.rs deleted file mode 100644 index 48fdd2d481..0000000000 --- a/crates/sequencer/src/config.rs +++ /dev/null @@ -1,129 +0,0 @@ -use serde::{Deserialize, Serialize}; - -/// Rollup Configuration -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct SequencerConfig { - /// Private key of the sequencer - pub private_key: String, - /// Min. soft confirmaitons for sequencer to commit - pub min_soft_confirmations_per_commitment: u64, - /// Whether or not the sequencer is running in test mode - pub test_mode: bool, - /// Limit for the number of deposit transactions to be included in the block - pub deposit_mempool_fetch_limit: usize, - /// Sequencer specific mempool config - pub mempool_conf: SequencerMempoolConfig, - /// DA layer update loop interval in ms - pub da_update_interval_ms: u64, - /// Block production interval in ms - pub block_production_interval_ms: u64, -} - -impl Default for SequencerConfig { - fn default() -> Self { - SequencerConfig { - private_key: "1212121212121212121212121212121212121212121212121212121212121212" - .to_string(), - min_soft_confirmations_per_commitment: 4, - test_mode: true, - deposit_mempool_fetch_limit: 10, - block_production_interval_ms: 100, - da_update_interval_ms: 100, - mempool_conf: Default::default(), - } - } -} - -/// Mempool Config for the sequencer -/// Read: https://github.com/ledgerwatch/erigon/wiki/Transaction-Pool-Design -#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] -pub struct SequencerMempoolConfig { - /// Max number of transactions in the pending sub-pool - pub pending_tx_limit: u64, - /// Max megabytes of transactions in the pending sub-pool - pub pending_tx_size: u64, - /// Max number of transactions in the queued sub-pool - pub queue_tx_limit: u64, - /// Max megabytes of transactions in the queued sub-pool - pub queue_tx_size: u64, - /// Max number of transactions in the base-fee sub-pool - pub base_fee_tx_limit: u64, - /// Max megabytes of transactions in the base-fee sub-pool - pub base_fee_tx_size: u64, - /// Max number of executable transaction slots guaranteed per account - pub max_account_slots: u64, -} - -impl Default for SequencerMempoolConfig { - fn default() -> Self { - Self { - pending_tx_limit: 100000, - pending_tx_size: 200, - queue_tx_limit: 100000, - queue_tx_size: 200, - base_fee_tx_limit: 100000, - base_fee_tx_size: 200, - max_account_slots: 16, - } - } -} - -#[cfg(test)] -mod tests { - use std::io::Write; - - use sov_stf_runner::from_toml_path; - use tempfile::NamedTempFile; - - use super::*; - - fn create_config_from(content: &str) -> NamedTempFile { - let mut config_file = NamedTempFile::new().unwrap(); - config_file.write_all(content.as_bytes()).unwrap(); - config_file - } - - #[test] - fn test_correct_config_sequencer() { - let config = r#" - private_key = "1212121212121212121212121212121212121212121212121212121212121212" - min_soft_confirmations_per_commitment = 123 - test_mode = false - deposit_mempool_fetch_limit = 10 - da_update_interval_ms = 1000 - block_production_interval_ms = 1000 - [mempool_conf] - pending_tx_limit = 100000 - pending_tx_size = 200 - queue_tx_limit = 100000 - queue_tx_size = 200 - base_fee_tx_limit = 100000 - base_fee_tx_size = 200 - max_account_slots = 16 - "#; - - let config_file = create_config_from(config); - - let config: SequencerConfig = from_toml_path(config_file.path()).unwrap(); - - let expected = SequencerConfig { - private_key: "1212121212121212121212121212121212121212121212121212121212121212" - .to_string(), - min_soft_confirmations_per_commitment: 123, - test_mode: false, - deposit_mempool_fetch_limit: 10, - mempool_conf: SequencerMempoolConfig { - pending_tx_limit: 100000, - pending_tx_size: 200, - queue_tx_limit: 100000, - queue_tx_size: 200, - base_fee_tx_limit: 100000, - base_fee_tx_size: 200, - max_account_slots: 16, - }, - da_update_interval_ms: 1000, - block_production_interval_ms: 1000, - }; - assert_eq!(config, expected); - } -} diff --git a/crates/sequencer/src/lib.rs b/crates/sequencer/src/lib.rs index 8c8940595a..a590df559c 100644 --- a/crates/sequencer/src/lib.rs +++ b/crates/sequencer/src/lib.rs @@ -1,5 +1,4 @@ mod commitment_controller; -mod config; mod db_provider; mod deposit_data_mempool; mod mempool; @@ -9,7 +8,8 @@ mod utils; use std::net::SocketAddr; -pub use config::{SequencerConfig, SequencerMempoolConfig}; +pub use citrea_common::{SequencerConfig, SequencerMempoolConfig}; +pub use rpc::SequencerRpcClient; pub use sequencer::CitreaSequencer; use sov_db::ledger_db::LedgerDB; use sov_modules_rollup_blueprint::RollupBlueprint; diff --git a/crates/sequencer/src/mempool.rs b/crates/sequencer/src/mempool.rs index 3a784909be..e477fdd2be 100644 --- a/crates/sequencer/src/mempool.rs +++ b/crates/sequencer/src/mempool.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use anyhow::{anyhow, bail}; +use citrea_common::SequencerMempoolConfig; use citrea_evm::SYSTEM_SIGNER; use reth_chainspec::{Chain, ChainSpecBuilder}; use reth_primitives::{Genesis, TxHash}; @@ -13,7 +14,6 @@ use reth_transaction_pool::{ TransactionPool, TransactionPoolExt, TransactionValidationTaskExecutor, ValidPoolTransaction, }; -use crate::config::SequencerMempoolConfig; pub use crate::db_provider::DbProvider; type CitreaMempoolImpl = Pool< diff --git a/crates/sequencer/src/sequencer.rs b/crates/sequencer/src/sequencer.rs index 2d2c1d0e15..99f35992d6 100644 --- a/crates/sequencer/src/sequencer.rs +++ b/crates/sequencer/src/sequencer.rs @@ -10,6 +10,7 @@ use backoff::ExponentialBackoffBuilder; use borsh::BorshDeserialize; use citrea_common::tasks::manager::TaskManager; use citrea_common::utils::merge_state_diffs; +use citrea_common::{RollupPublicKeys, RpcConfig, SequencerConfig}; use citrea_evm::{CallMessage, Evm, RlpEvmTransaction, MIN_TRANSACTION_GAS}; use citrea_primitives::basefee::calculate_next_block_base_fee; use citrea_primitives::types::SoftConfirmationHash; @@ -46,7 +47,7 @@ use sov_rollup_interface::services::da::{DaService, SenderWithNotifier}; use sov_rollup_interface::stf::StateTransitionFunction; use sov_rollup_interface::storage::HierarchicalStorageManager; use sov_rollup_interface::zk::ZkvmHost; -use sov_stf_runner::{InitVariant, RollupPublicKeys, RpcConfig}; +use sov_stf_runner::InitVariant; use tokio::signal; use tokio::sync::oneshot::channel as oneshot_channel; use tokio::sync::{broadcast, mpsc}; @@ -55,7 +56,6 @@ use tokio_util::sync::CancellationToken; use tracing::{debug, error, info, instrument, trace, warn}; use crate::commitment_controller; -use crate::config::SequencerConfig; use crate::db_provider::DbProvider; use crate::deposit_data_mempool::DepositDataMempool; use crate::mempool::CitreaMempool; diff --git a/crates/sovereign-sdk/examples/demo-stf/Cargo.toml b/crates/sovereign-sdk/examples/demo-stf/Cargo.toml index 1dd210e633..1c88b6fa02 100644 --- a/crates/sovereign-sdk/examples/demo-stf/Cargo.toml +++ b/crates/sovereign-sdk/examples/demo-stf/Cargo.toml @@ -16,7 +16,6 @@ borsh = { workspace = true } serde = { workspace = true } serde_json = { workspace = true, optional = true } clap = { workspace = true, optional = true } -toml = { workspace = true, optional = true } jsonrpsee = { workspace = true, features = [ "http-client", "server", @@ -67,7 +66,6 @@ native = [ "soft-confirmation-rule-enforcer/native", "jsonrpsee", "tokio", - "toml", ] serde = [ "sov-bank/serde", diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/Cargo.toml b/crates/sovereign-sdk/full-node/sov-stf-runner/Cargo.toml index 3d4d84efa4..76058e7b03 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/Cargo.toml +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/Cargo.toml @@ -24,7 +24,6 @@ serde = { workspace = true } serde_json = { workspace = true } thiserror = { workspace = true, optional = true } tokio = { workspace = true, optional = true } -toml = { workspace = true, optional = true } tower = { workspace = true, optional = true } tracing = { workspace = true, optional = true } @@ -33,14 +32,10 @@ sov-db = { path = "../db/sov-db", optional = true } sov-modules-api = { path = "../../module-system/sov-modules-api", default-features = false } sov-rollup-interface = { path = "../../rollup-interface" } -# Citrea -citrea-pruning = { path = "../../../pruning", optional = true } - [dev-dependencies] sha2 = { workspace = true } tempfile = { workspace = true } -sov-mock-da = { path = "../../adapters/mock-da", features = ["native"] } sov-modules-api = { path = "../../module-system/sov-modules-api", features = ["native"] } sov-prover-storage-manager = { path = "../sov-prover-storage-manager", features = ["test-utils"] } sov-state = { path = "../../module-system/sov-state", features = ["native"] } @@ -52,7 +47,6 @@ native = [ "sov-db", "sov-modules-api/native", "jsonrpsee", - "toml", "tokio", "tracing", "futures", @@ -61,5 +55,4 @@ native = [ "rand", "tower", "hyper", - "dep:citrea-pruning", ] diff --git a/crates/sovereign-sdk/full-node/sov-stf-runner/src/lib.rs b/crates/sovereign-sdk/full-node/sov-stf-runner/src/lib.rs index 700f8fab4e..2eff97fbc6 100644 --- a/crates/sovereign-sdk/full-node/sov-stf-runner/src/lib.rs +++ b/crates/sovereign-sdk/full-node/sov-stf-runner/src/lib.rs @@ -1,9 +1,6 @@ #![deny(missing_docs)] #![doc = include_str!("../README.md")] -#[cfg(feature = "native")] -/// Config -pub mod config; /// Testing utilities. #[cfg(feature = "mock")] pub mod mock; @@ -16,8 +13,6 @@ use std::path::Path; #[cfg(feature = "native")] use anyhow::Context; #[cfg(feature = "native")] -pub use config::*; -#[cfg(feature = "native")] pub use prover_service::*; #[cfg(feature = "native")] use sov_modules_api::{DaSpec, Zkvm}; diff --git a/crates/sovereign-sdk/module-system/sov-modules-core/src/common/witness.rs b/crates/sovereign-sdk/module-system/sov-modules-core/src/common/witness.rs index 71693fcaef..db4212fbd1 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-core/src/common/witness.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-core/src/common/witness.rs @@ -14,7 +14,9 @@ use serde::Serialize; /// they were added via [`Witness::add_hint`]. // TODO: Refactor witness trait so it only require Serialize / Deserialize // https://github.com/Sovereign-Labs/sovereign-sdk/issues/263 -pub trait Witness: Default + BorshDeserialize + Serialize + DeserializeOwned { +pub trait Witness: + Default + BorshSerialize + BorshDeserialize + Serialize + DeserializeOwned +{ /// Adds a serializable "hint" to the witness value, which can be later /// read by the zkVM circuit. /// diff --git a/crates/sovereign-sdk/module-system/sov-modules-macros/Cargo.toml b/crates/sovereign-sdk/module-system/sov-modules-macros/Cargo.toml index 96edb26efe..888062cd6f 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-macros/Cargo.toml +++ b/crates/sovereign-sdk/module-system/sov-modules-macros/Cargo.toml @@ -37,7 +37,7 @@ jsonrpsee = { workspace = true, features = ["http-client", "server"], optional = proc-macro2 = "1.0" quote = "1.0" serde_json = { workspace = true } -syn = { version = "1.0", features = ["full"] } +syn = { version = "1.0", features = ["full", "extra-traits"] } [features] default = ["native"] diff --git a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml index 706a2f3091..8bee6e5de0 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml +++ b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/Cargo.toml @@ -12,6 +12,7 @@ resolver = "2" description = "This crate contains abstractions needed to create a new rollup" [dependencies] +citrea-common = { path = "../../../common" } sov-cli = { path = "../../module-system/sov-cli" } sov-modules-api = { path = "../../module-system/sov-modules-api", features = ["native"] } sov-rollup-interface = { path = "../../rollup-interface", features = ["native"] } diff --git a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs index 74ed15aecc..386cc20c1b 100644 --- a/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs +++ b/crates/sovereign-sdk/module-system/sov-modules-rollup-blueprint/src/lib.rs @@ -8,6 +8,7 @@ use std::collections::HashMap; use std::sync::Arc; use async_trait::async_trait; +use citrea_common::{FullNodeConfig, ProverConfig}; pub use runtime_rpc::*; use sov_db::ledger_db::LedgerDB; use sov_db::rocks_db_config::RocksdbConfig; @@ -18,7 +19,7 @@ use sov_rollup_interface::spec::SpecId; use sov_rollup_interface::storage::HierarchicalStorageManager; use sov_rollup_interface::zk::{Zkvm, ZkvmHost}; use sov_state::Storage; -use sov_stf_runner::{FullNodeConfig, ProverConfig, ProverService}; +use sov_stf_runner::ProverService; use tokio::sync::broadcast; pub use wallet::*; diff --git a/crates/sovereign-sdk/rollup-interface/src/state_machine/stf.rs b/crates/sovereign-sdk/rollup-interface/src/state_machine/stf.rs index e1e7e8a57a..1e7b000f43 100644 --- a/crates/sovereign-sdk/rollup-interface/src/state_machine/stf.rs +++ b/crates/sovereign-sdk/rollup-interface/src/state_machine/stf.rs @@ -193,7 +193,13 @@ pub trait StateTransitionFunction { /// Witness is a data that is produced during actual batch execution /// or validated together with proof during verification - type Witness: Default + BorshDeserialize + Serialize + DeserializeOwned + Send + Sync; + type Witness: Default + + BorshSerialize + + BorshDeserialize + + Serialize + + DeserializeOwned + + Send + + Sync; /// The validity condition that must be verified outside of the Vm type Condition: ValidityCondition; diff --git a/resources/scripts/cycle-diff.sh b/resources/scripts/cycle-diff.sh index cc509229f2..3ea5034176 100755 --- a/resources/scripts/cycle-diff.sh +++ b/resources/scripts/cycle-diff.sh @@ -16,7 +16,7 @@ # # Environment Variables: # BASE_BRANCH: The branch to compare against (default: nightly) -# TEST_NAME: The test file to run (default: bitcoin_e2e::tests::prover_test::basic_prover_test) +# TEST_NAME: The test file to run (default: bitcoin_e2e::prover_test::basic_prover_test) # TARGET_PCT: The threshold percentage for regression detection (default: 3) # NUM_RUNS: Number of times to run the test for averaging (default: 1) # @@ -38,7 +38,7 @@ set -euo pipefail BASE_BRANCH=${BASE_BRANCH:-"nightly"} -TEST_NAME=${TEST_NAME:-"bitcoin_e2e::tests::prover_test::basic_prover_test"} +TEST_NAME=${TEST_NAME:-"bitcoin_e2e::prover_test::basic_prover_test"} TARGET_PCT=${TARGET_PCT:-3} COMPARISON_FILE=${COMPARISON_FILE:-"comparison_results.log"} NUM_RUNS=${NUM_RUNS:-1}