From 40501fba64d5eb7c574c4205d88182640935019e Mon Sep 17 00:00:00 2001 From: jfldde <168934971+jfldde@users.noreply.github.com> Date: Mon, 14 Oct 2024 21:35:17 +0100 Subject: [PATCH] Citrea docker support --- .github/workflows/code_checks.yml | 8 + src/bitcoin.rs | 55 +++--- src/citrea_config/rollup.rs | 71 ++++++++ src/config/bitcoin.rs | 2 + src/config/docker.rs | 79 +++++--- src/config/mod.rs | 110 ++++++++++- src/config/rollup.rs | 71 +------- src/config/test_case.rs | 35 +++- src/config/utils.rs | 6 +- src/docker.rs | 166 +++++++++++------ src/framework.rs | 293 ++++++++++++++++++++++++++++-- src/node.rs | 108 ++++++----- src/test_case.rs | 244 +------------------------ src/traits.rs | 12 +- src/utils.rs | 22 ++- tests/mod.rs | 72 ++++++++ 16 files changed, 841 insertions(+), 513 deletions(-) create mode 100644 tests/mod.rs diff --git a/.github/workflows/code_checks.yml b/.github/workflows/code_checks.yml index 9d92293..8c4eec7 100644 --- a/.github/workflows/code_checks.yml +++ b/.github/workflows/code_checks.yml @@ -46,3 +46,11 @@ jobs: with: version: "latest" args: "--workspace --all-features --all-targets" + + test: + name: Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Run Cargo test + run: cargo test diff --git a/src/bitcoin.rs b/src/bitcoin.rs index f7e9b4a..f41953c 100644 --- a/src/bitcoin.rs +++ b/src/bitcoin.rs @@ -35,7 +35,7 @@ pub struct BitcoinNode { impl BitcoinNode { pub async fn new(config: &BitcoinConfig, docker: Arc>) -> Result { - let spawn_output = Self::spawn(config, &docker).await?; + let spawn_output = ::spawn(config, &docker).await?; let rpc_url = format!( "http://127.0.0.1:{}/wallet/{}", @@ -136,12 +136,26 @@ impl BitcoinNode { .await; } - // Switch this over to Node signature once we add support for docker to citrea nodes - async fn spawn(config: &BitcoinConfig, docker: &Arc>) -> Result { - match docker.as_ref() { - Some(docker) => docker.spawn(config.into()).await, - None => ::spawn(config), - } + fn spawn(config: &BitcoinConfig) -> Result { + let args = config.args(); + debug!("Running bitcoind with args : {args:?}"); + + info!( + "Bitcoin debug.log available at : {}", + config.log_path().display() + ); + + let stderr_path = config.stderr_path(); + let stderr_file = File::create(stderr_path).context("Failed to create stderr file")?; + + Command::new("bitcoind") + .args(&args) + .kill_on_drop(true) + .envs(config.env.clone()) + .stderr(Stdio::from(stderr_file)) + .spawn() + .context("Failed to spawn bitcoind process") + .map(SpawnOutput::Child) } } @@ -182,26 +196,11 @@ impl NodeT for BitcoinNode { type Config = BitcoinConfig; type Client = Client; - fn spawn(config: &Self::Config) -> Result { - let args = config.args(); - debug!("Running bitcoind with args : {args:?}"); - - info!( - "Bitcoin debug.log available at : {}", - config.log_path().display() - ); - - let stderr_path = config.stderr_path(); - let stderr_file = File::create(stderr_path).context("Failed to create stderr file")?; - - Command::new("bitcoind") - .args(&args) - .kill_on_drop(true) - .envs(config.env.clone()) - .stderr(Stdio::from(stderr_file)) - .spawn() - .context("Failed to spawn bitcoind process") - .map(SpawnOutput::Child) + async fn spawn(config: &Self::Config, docker: &Arc>) -> Result { + match docker.as_ref() { + Some(docker) => docker.spawn(config.into()).await, + None => Self::spawn(config), + } } fn spawn_output(&mut self) -> &mut SpawnOutput { @@ -270,7 +269,7 @@ impl Restart for BitcoinNode { if let Some(config) = config { self.config = config; } - self.spawn_output = Self::spawn(&self.config, &self.docker_env).await?; + self.spawn_output = ::spawn(&self.config, &self.docker_env).await?; self.wait_for_ready(None).await?; diff --git a/src/citrea_config/rollup.rs b/src/citrea_config/rollup.rs index 23bad87..63cb626 100644 --- a/src/citrea_config/rollup.rs +++ b/src/citrea_config/rollup.rs @@ -1,6 +1,9 @@ use std::path::PathBuf; use serde::{Deserialize, Serialize}; +use tempfile::TempDir; + +use crate::config::{BitcoinConfig, BitcoinServiceConfig}; /// Runner configuration. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] @@ -121,6 +124,74 @@ pub struct FullNodeConfig { pub public_keys: RollupPublicKeys, } +impl Default for FullNodeConfig { + fn default() -> Self { + Self { + rpc: RpcConfig { + bind_host: "127.0.0.1".into(), + bind_port: 0, + max_connections: 100, + max_request_body_size: 10 * 1024 * 1024, + max_response_body_size: 10 * 1024 * 1024, + batch_requests_limit: 50, + enable_subscriptions: true, + max_subscriptions_per_connection: 100, + }, + storage: StorageConfig { + path: TempDir::new() + .expect("Failed to create temporary directory") + .into_path(), + db_max_open_files: None, + }, + runner: None, + da: BitcoinServiceConfig { + node_url: String::new(), + node_username: String::from("user"), + node_password: String::from("password"), + network: bitcoin::Network::Regtest, + da_private_key: None, + tx_backup_dir: TempDir::new() + .expect("Failed to create temporary directory") + .into_path() + .display() + .to_string(), + }, + public_keys: RollupPublicKeys { + sequencer_public_key: vec![ + 32, 64, 64, 227, 100, 193, 15, 43, 236, 156, 31, 229, 0, 161, 205, 76, 36, 124, + 137, 214, 80, 160, 30, 215, 232, 44, 171, 168, 103, 135, 124, 33, + ], + // private key [4, 95, 252, 129, 163, 193, 253, 179, 175, 19, 89, 219, 242, 209, 20, 176, 179, 239, 191, 127, 41, 204, 156, 93, 160, 18, 103, 170, 57, 210, 199, 141] + // Private Key (WIF): KwNDSCvKqZqFWLWN1cUzvMiJQ7ck6ZKqR6XBqVKyftPZtvmbE6YD + sequencer_da_pub_key: vec![ + 3, 136, 195, 18, 11, 187, 25, 37, 38, 109, 184, 237, 247, 208, 131, 219, 162, + 70, 35, 174, 234, 47, 239, 247, 60, 51, 174, 242, 247, 112, 186, 222, 30, + ], + // private key [117, 186, 249, 100, 208, 116, 89, 70, 0, 54, 110, 91, 17, 26, 29, 168, 248, 107, 46, 254, 45, 34, 218, 81, 200, 216, 33, 38, 160, 252, 172, 114] + // Private Key (WIF): L1AZdJXzDGGENBBPZGSL7dKJnwn5xSKqzszgK6CDwiBGThYQEVTo + prover_da_pub_key: vec![ + 2, 138, 232, 157, 214, 46, 7, 210, 235, 33, 105, 239, 71, 169, 105, 233, 239, + 84, 172, 112, 13, 54, 9, 206, 106, 138, 251, 218, 15, 28, 137, 112, 127, + ], + }, + } + } +} + +impl From for BitcoinServiceConfig { + fn from(v: BitcoinConfig) -> Self { + let ip = v.docker_ip.unwrap_or(String::from("127.0.0.1")); + Self { + node_url: format!("{}:{}", ip, v.rpc_port), + node_username: v.rpc_user, + node_password: v.rpc_password, + network: v.network, + da_private_key: None, + tx_backup_dir: String::new(), + } + } +} + /// A configuration type to define the behaviour of the pruner. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct PruningConfig { diff --git a/src/config/bitcoin.rs b/src/config/bitcoin.rs index 47c2ede..50882ea 100644 --- a/src/config/bitcoin.rs +++ b/src/config/bitcoin.rs @@ -17,6 +17,7 @@ pub struct BitcoinConfig { pub docker_image: Option, pub env: Vec<(&'static str, &'static str)>, pub idx: usize, + pub docker_ip: Option, } impl Default for BitcoinConfig { @@ -34,6 +35,7 @@ impl Default for BitcoinConfig { docker_image: Some("bitcoin/bitcoin:28.0".to_string()), env: Vec::new(), idx: 0, + docker_ip: None, } } } diff --git a/src/config/docker.rs b/src/config/docker.rs index d8b2e7d..a2e9ebd 100644 --- a/src/config/docker.rs +++ b/src/config/docker.rs @@ -1,8 +1,18 @@ +use std::fmt::Debug; use std::path::PathBuf; -use super::{BitcoinConfig, FullSequencerConfig}; +use serde::Serialize; +use tracing::debug; + +use super::{BitcoinConfig, FullL2NodeConfig, NodeKindMarker}; +use crate::log_provider::LogPathProvider; +use crate::node::{get_citrea_args, Config, NodeKind}; use crate::utils::get_genesis_path; +const DEFAULT_BITCOIN_DOCKER_IMAGE: &str = "bitcoin/bitcoin:28.0"; +const DEFAULT_CITREA_DOCKER_IMAGE: &str = + "chainwayxyz/citrea:e7db3c1c1787014ec4f7eb365bd8657d8f0917a0"; + #[derive(Debug)] pub struct VolumeConfig { pub name: String, @@ -16,11 +26,13 @@ pub struct DockerConfig { pub cmd: Vec, pub log_path: PathBuf, pub volume: VolumeConfig, + pub host_dir: Option>, + pub kind: NodeKind, } impl From<&BitcoinConfig> for DockerConfig { - fn from(v: &BitcoinConfig) -> Self { - let mut args = v.args(); + fn from(config: &BitcoinConfig) -> Self { + let mut args = config.args(); // Docker specific args args.extend([ @@ -30,48 +42,55 @@ impl From<&BitcoinConfig> for DockerConfig { ]); Self { - ports: vec![v.rpc_port, v.p2p_port], - image: v + ports: vec![config.rpc_port, config.p2p_port], + image: config .docker_image .clone() - .unwrap_or_else(|| "bitcoin/bitcoin:28.0".to_string()), + .unwrap_or_else(|| DEFAULT_BITCOIN_DOCKER_IMAGE.to_string()), cmd: args, - log_path: v.data_dir.join("regtest").join("debug.log"), + log_path: config.data_dir.join("regtest").join("debug.log"), volume: VolumeConfig { - name: format!("bitcoin-{}", v.idx), + name: format!("bitcoin-{}", config.idx), target: "/home/bitcoin/.bitcoin".to_string(), }, + host_dir: None, + kind: NodeKind::Bitcoin, } } } -impl From<&FullSequencerConfig> for DockerConfig { - fn from(v: &FullSequencerConfig) -> Self { - let args = vec![ - "--da-layer".to_string(), - "bitcoin".to_string(), - "--rollup-config-path".to_string(), - "sequencer_rollup_config.toml".to_string(), - "--sequencer-config-path".to_string(), - "sequencer_config.toml".to_string(), - "--genesis-paths".to_string(), - get_genesis_path(v.dir.parent().expect("Couldn't get parent dir")) - .display() - .to_string(), - ]; +impl From> for DockerConfig +where + T: Clone + Serialize + Debug, + FullL2NodeConfig: NodeKindMarker, +{ + fn from(config: FullL2NodeConfig) -> Self { + let kind = FullL2NodeConfig::::kind(); + + debug!("Converting config {config:?} for {kind} to docker config"); + + let args = get_citrea_args(&config); Self { - ports: vec![v.rollup.rpc.bind_port], - image: v - .docker_image - .clone() - .unwrap_or_else(|| "citrea:latest".to_string()), // Default to local image + ports: vec![config.rollup.rpc.bind_port], + image: config.docker_image.clone().unwrap_or_else(|| { + let base_img = DEFAULT_CITREA_DOCKER_IMAGE; + match std::env::var("SHORT_PREFIX") { + Ok(_) => format!("{base_img}-short-prefix"), + _ => base_img.to_string(), + } + }), cmd: args, - log_path: v.dir.join("stdout"), + log_path: config.dir.join("stdout.log"), volume: VolumeConfig { - name: "sequencer".to_string(), - target: "/sequencer/data".to_string(), + name: format!("{kind}"), + target: format!("/{kind}/data"), }, + host_dir: Some(vec![ + config.dir().to_owned().display().to_string(), + get_genesis_path(&config), + ]), + kind, } } } diff --git a/src/config/mod.rs b/src/config/mod.rs index b069209..cc09687 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -5,14 +5,18 @@ mod test; mod test_case; mod utils; -use std::path::PathBuf; +use std::{ + fmt::{self, Debug}, + path::PathBuf, +}; +use anyhow::Context; pub use bitcoin::BitcoinConfig; pub use docker::DockerConfig; -pub use rollup::{default_rollup_config, RollupConfig}; +pub use rollup::RollupConfig; use serde::Serialize; pub use test::TestConfig; -pub use test_case::{TestCaseConfig, TestCaseEnv}; +pub use test_case::{TestCaseConfig, TestCaseDockerConfig, TestCaseEnv}; pub use utils::config_to_file; pub use crate::citrea_config::{ @@ -25,15 +29,69 @@ pub use crate::citrea_config::{ use crate::{ log_provider::LogPathProvider, node::{Config, NodeKind}, + Result, }; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] +pub enum DaLayer { + #[default] + Bitcoin, + MockDa, +} + +impl fmt::Display for DaLayer { + // This trait requires `fmt` with this exact signature. + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + DaLayer::Bitcoin => write!(f, "bitcoin"), + DaLayer::MockDa => write!(f, "mock"), + } + } +} + +#[derive(Clone, Debug, Default)] pub struct FullL2NodeConfig { pub node: T, pub rollup: RollupConfig, pub docker_image: Option, pub dir: PathBuf, pub env: Vec<(&'static str, &'static str)>, + pub da_layer: Option, +} + +impl FullL2NodeConfig +where + T: Clone + Serialize + Debug, + FullL2NodeConfig: NodeKindMarker, +{ + pub fn new( + node: T, + rollup: RollupConfig, + docker_image: Option, + dir: PathBuf, + env: Vec<(&'static str, &'static str)>, + ) -> Result { + let conf = Self { + node, + rollup, + docker_image, + dir, + env, + da_layer: None, + }; + + let kind = FullL2NodeConfig::::kind(); + let node_config_args = conf.get_node_config_args().unwrap_or_default(); + if let (Some(config), Some(config_path)) = (conf.node_config(), node_config_args.get(1)) { + config_to_file(config, &config_path) + .with_context(|| format!("Error writing {kind} config to file"))?; + } + + let rollup_config_args = conf.get_rollup_config_args(); + config_to_file(&conf.rollup_config(), &rollup_config_args[1])?; + + Ok(conf) + } } pub type FullSequencerConfig = FullL2NodeConfig; @@ -83,6 +141,10 @@ where self.env.clone() } + fn node_kind() -> NodeKind { + ::KIND + } + fn node_config(&self) -> Option<&Self::NodeConfig> { if std::mem::size_of::() == 0 { None @@ -90,18 +152,48 @@ where Some(&self.node) } } + fn rollup_config(&self) -> &RollupConfig { + &self.rollup + } - fn node_kind() -> NodeKind { - ::KIND + // Get node config path argument and path. + // Not required for `full-node` + fn get_node_config_args(&self) -> Option> { + let dir = self.dir(); + let kind = Self::node_kind(); + self.node_config().map_or(None, |_| { + let config_path = dir.join(format!("{kind}_config.toml")); + let node_kind_str = match &kind { + NodeKind::BatchProver | NodeKind::LightClientProver => "prover".to_string(), + kind => kind.to_string(), + }; + Some(vec![ + format!("--{node_kind_str}-config-path"), + config_path.display().to_string(), + ]) + }) } - fn rollup_config(&self) -> &RollupConfig { - &self.rollup + // Get rollup config path argument and path. + fn get_rollup_config_args(&self) -> Vec { + let kind = Self::node_kind(); + vec![ + format!("--rollup-config-path"), + self.dir() + .join(format!("{kind}_rollup_config.toml")) + .display() + .to_string(), + ] + } + + fn da_layer(&self) -> DaLayer { + self.da_layer.clone().unwrap_or_default() } } -impl LogPathProvider for FullL2NodeConfig +impl LogPathProvider for FullL2NodeConfig where + T: Clone, FullL2NodeConfig: Config, { fn kind() -> NodeKind { diff --git a/src/config/rollup.rs b/src/config/rollup.rs index cc6066d..e2a1934 100644 --- a/src/config/rollup.rs +++ b/src/config/rollup.rs @@ -1,72 +1,3 @@ -use tempfile::TempDir; +use crate::config::{BitcoinServiceConfig, FullNodeConfig}; -use super::BitcoinConfig; -use crate::config::{ - BitcoinServiceConfig, FullNodeConfig, RollupPublicKeys, RpcConfig, StorageConfig, -}; pub type RollupConfig = FullNodeConfig; - -pub fn default_rollup_config() -> RollupConfig { - RollupConfig { - rpc: RpcConfig { - bind_host: "127.0.0.1".into(), - bind_port: 0, - max_connections: 100, - max_request_body_size: 10 * 1024 * 1024, - max_response_body_size: 10 * 1024 * 1024, - batch_requests_limit: 50, - enable_subscriptions: true, - max_subscriptions_per_connection: 100, - }, - storage: StorageConfig { - path: TempDir::new() - .expect("Failed to create temporary directory") - .into_path(), - db_max_open_files: None, - }, - runner: None, - da: BitcoinServiceConfig { - node_url: String::new(), - node_username: String::from("user"), - node_password: String::from("password"), - network: bitcoin::Network::Regtest, - da_private_key: None, - tx_backup_dir: TempDir::new() - .expect("Failed to create temporary directory") - .into_path() - .display() - .to_string(), - }, - public_keys: RollupPublicKeys { - sequencer_public_key: vec![ - 32, 64, 64, 227, 100, 193, 15, 43, 236, 156, 31, 229, 0, 161, 205, 76, 36, 124, - 137, 214, 80, 160, 30, 215, 232, 44, 171, 168, 103, 135, 124, 33, - ], - // private key [4, 95, 252, 129, 163, 193, 253, 179, 175, 19, 89, 219, 242, 209, 20, 176, 179, 239, 191, 127, 41, 204, 156, 93, 160, 18, 103, 170, 57, 210, 199, 141] - // Private Key (WIF): KwNDSCvKqZqFWLWN1cUzvMiJQ7ck6ZKqR6XBqVKyftPZtvmbE6YD - sequencer_da_pub_key: vec![ - 3, 136, 195, 18, 11, 187, 25, 37, 38, 109, 184, 237, 247, 208, 131, 219, 162, 70, - 35, 174, 234, 47, 239, 247, 60, 51, 174, 242, 247, 112, 186, 222, 30, - ], - // private key [117, 186, 249, 100, 208, 116, 89, 70, 0, 54, 110, 91, 17, 26, 29, 168, 248, 107, 46, 254, 45, 34, 218, 81, 200, 216, 33, 38, 160, 252, 172, 114] - // Private Key (WIF): L1AZdJXzDGGENBBPZGSL7dKJnwn5xSKqzszgK6CDwiBGThYQEVTo - prover_da_pub_key: vec![ - 2, 138, 232, 157, 214, 46, 7, 210, 235, 33, 105, 239, 71, 169, 105, 233, 239, 84, - 172, 112, 13, 54, 9, 206, 106, 138, 251, 218, 15, 28, 137, 112, 127, - ], - }, - } -} - -impl From for BitcoinServiceConfig { - fn from(v: BitcoinConfig) -> Self { - Self { - node_url: format!("127.0.0.1:{}", v.rpc_port), - node_username: v.rpc_user, - node_password: v.rpc_password, - network: v.network, - da_private_key: None, - tx_backup_dir: String::new(), - } - } -} diff --git a/src/config/test_case.rs b/src/config/test_case.rs index bc081a8..ea1784e 100644 --- a/src/config/test_case.rs +++ b/src/config/test_case.rs @@ -1,4 +1,4 @@ -use std::{path::PathBuf, time::Duration}; +use std::{env, path::PathBuf, time::Duration}; use tempfile::TempDir; @@ -43,7 +43,7 @@ impl TestCaseEnv { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TestCaseConfig { pub n_nodes: usize, pub with_sequencer: bool, @@ -52,7 +52,7 @@ pub struct TestCaseConfig { pub with_light_client_prover: bool, pub timeout: Duration, pub dir: PathBuf, - pub docker: bool, + pub docker: TestCaseDockerConfig, // Either a relative dir from workspace root, i.e. "./resources/genesis/devnet" // Or an absolute path. // Defaults to resources/genesis/bitcoin-regtest @@ -71,8 +71,35 @@ impl Default for TestCaseConfig { dir: TempDir::new() .expect("Failed to create temporary directory") .into_path(), - docker: std::env::var("USE_DOCKER").map_or(false, |v| v.parse().unwrap_or(false)), + docker: TestCaseDockerConfig::default(), genesis_dir: None, } } } + +#[derive(Clone, Debug)] +pub struct TestCaseDockerConfig { + pub bitcoin: bool, + pub citrea: bool, +} + +impl Default for TestCaseDockerConfig { + fn default() -> Self { + TestCaseDockerConfig { + bitcoin: parse_bool_env("TEST_BITCOIN_DOCKER"), + citrea: parse_bool_env("TEST_CITREA_DOCKER"), + } + } +} + +impl TestCaseDockerConfig { + pub fn enabled(&self) -> bool { + self.bitcoin || self.citrea + } +} + +pub fn parse_bool_env(key: &str) -> bool { + env::var(key) + .map(|v| &v == "1" || &v.to_lowercase() == "true") + .unwrap_or(false) +} diff --git a/src/config/utils.rs b/src/config/utils.rs index 49beb15..afc1cbe 100644 --- a/src/config/utils.rs +++ b/src/config/utils.rs @@ -1,11 +1,11 @@ -use std::path::Path; +use std::{fmt::Debug, path::Path}; use serde::Serialize; pub fn config_to_file(config: &C, path: &P) -> std::io::Result<()> where - C: Serialize, - P: AsRef, + C: Serialize + Debug, + P: AsRef + Debug, { let toml = toml::to_string(config).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; diff --git a/src/docker.rs b/src/docker.rs index 65f4c5c..f1ad1da 100644 --- a/src/docker.rs +++ b/src/docker.rs @@ -16,65 +16,69 @@ use bollard::{ Docker, }; use futures::StreamExt; -use tokio::{fs::File, io::AsyncWriteExt, task::JoinHandle}; -use tracing::{debug, info}; +use tokio::{fs::File, io::AsyncWriteExt, sync::Mutex, task::JoinHandle}; +use tracing::{debug, error, info}; + +use crate::node::NodeKind; use super::{config::DockerConfig, traits::SpawnOutput, utils::generate_test_id}; -use crate::traits::ContainerSpawnOutput; + +#[derive(Debug)] +pub struct ContainerSpawnOutput { + pub id: String, + pub ip: String, +} + +#[derive(Debug, Clone)] +pub struct NetworkInfo { + id: String, + name: String, +} pub struct DockerEnv { pub docker: Docker, - pub network_id: String, - pub network_name: String, + pub network_info: NetworkInfo, id: String, - volumes: HashSet, + volumes: Mutex>, + container_ids: Mutex>, } impl DockerEnv { - pub async fn new(n_nodes: usize) -> Result { + pub async fn new() -> Result { let docker = Docker::connect_with_local_defaults().context("Failed to connect to Docker")?; let test_id = generate_test_id(); - let (network_id, network_name) = Self::create_network(&docker, &test_id).await?; - let volumes = Self::create_volumes(&docker, &test_id, n_nodes).await?; + let network_info = Self::create_network(&docker, &test_id).await?; Ok(Self { docker, - network_id, - network_name, + network_info, id: test_id, - volumes, + volumes: Mutex::new(HashSet::new()), + container_ids: Mutex::new(HashSet::new()), }) } - async fn create_volumes( - docker: &Docker, - test_case_id: &str, - n_nodes: usize, - ) -> Result> { - let volume_configs = vec![("bitcoin", n_nodes)]; - let mut volumes = HashSet::new(); - - for (name, n) in volume_configs { - for i in 0..n { - let volume_name = format!("{name}-{i}-{test_case_id}"); - docker - .create_volume(CreateVolumeOptions { - name: volume_name.clone(), - driver: "local".to_string(), - driver_opts: HashMap::new(), - labels: HashMap::new(), - }) - .await?; - - volumes.insert(volume_name); - } - } + /// Create a volume per node + /// Keeps track of volumes for cleanup + async fn create_volume(&self, config: &DockerConfig) -> Result<()> { + let volume_name = format!("{}-{}", config.volume.name, self.id); + self.docker + .create_volume(CreateVolumeOptions { + name: volume_name.clone(), + driver: "local".to_string(), + driver_opts: HashMap::new(), + labels: HashMap::new(), + }) + .await?; + + self.volumes.lock().await.insert(volume_name); - Ok(volumes) + Ok(()) } - async fn create_network(docker: &Docker, test_case_id: &str) -> Result<(String, String)> { + /// Create a new test network and return its network, name and id + async fn create_network(docker: &Docker, test_case_id: &str) -> Result { let network_name = format!("test_network_{test_case_id}"); let options = CreateNetworkOptions { name: network_name.clone(), @@ -88,11 +92,22 @@ impl DockerEnv { .await? .id .context("Error getting network id")?; - Ok((id, network_name)) + + Ok(NetworkInfo { + id, + name: network_name, + }) + } + + pub fn get_hostname(&self, kind: &NodeKind) -> String { + format!("{kind}-{}", self.id) } pub async fn spawn(&self, config: DockerConfig) -> Result { debug!("Spawning docker with config {config:#?}"); + + self.create_volume(&config).await?; + let exposed_ports: HashMap> = config .ports .iter() @@ -114,24 +129,41 @@ impl DockerEnv { .collect(); let mut network_config = HashMap::new(); - network_config.insert(self.network_id.clone(), EndpointSettings::default()); + network_config.insert( + self.network_info.id.clone(), + EndpointSettings { + ip_address: Some(self.get_hostname(&config.kind)), + ..Default::default() + }, + ); let volume_name = format!("{}-{}", config.volume.name, self.id); - let mount = Mount { + let mut mounts = vec![Mount { target: Some(config.volume.target.clone()), source: Some(volume_name), typ: Some(MountTypeEnum::VOLUME), ..Default::default() - }; + }]; + + if let Some(host_dir) = &config.host_dir { + for dir in host_dir { + mounts.push(Mount { + target: Some(dir.clone()), + source: Some(dir.clone()), + typ: Some(MountTypeEnum::BIND), + ..Default::default() + }); + } + } let container_config = Config { + hostname: Some(format!("{}-{}", config.kind, self.id)), image: Some(config.image), cmd: Some(config.cmd), exposed_ports: Some(exposed_ports), host_config: Some(HostConfig { port_bindings: Some(port_bindings), - // binds: Some(vec![config.dir]), - mounts: Some(vec![mount]), + mounts: Some(mounts), ..Default::default() }), networking_config: Some(NetworkingConfig { @@ -153,6 +185,8 @@ impl DockerEnv { .await .map_err(|e| anyhow!("Failed to create Docker container {e}"))?; + self.container_ids.lock().await.insert(container.id.clone()); + self.docker .start_container::(&container.id, None) .await @@ -173,12 +207,19 @@ impl DockerEnv { // Extract container logs to host // This spawns a background task to continuously stream logs from the container. // The task will run until the container is stopped or removed during cleanup. - Self::extract_container_logs(self.docker.clone(), container.id.clone(), config.log_path); - - Ok(SpawnOutput::Container(ContainerSpawnOutput { + Self::extract_container_logs( + self.docker.clone(), + container.id.clone(), + config.log_path, + &config.kind, + ); + + let spawn_output = SpawnOutput::Container(ContainerSpawnOutput { id: container.id, ip: ip_address, - })) + }); + debug!("{}, spawn_output : {spawn_output:?}", config.kind); + Ok(spawn_output) } async fn ensure_image_exists(&self, image: &str) -> Result<()> { @@ -218,25 +259,29 @@ impl DockerEnv { } pub async fn cleanup(&self) -> Result<()> { + for id in self.container_ids.lock().await.iter() { + debug!("Logs for container {}:", id); + let _ = self.dump_logs_cli(id); + } + let containers = self.docker.list_containers::(None).await?; for container in containers { if let (Some(id), Some(networks)) = ( container.id, container.network_settings.and_then(|ns| ns.networks), ) { - if networks.contains_key(&self.network_name) { + if networks.contains_key(&self.network_info.name) { self.docker.stop_container(&id, None).await?; self.docker.remove_container(&id, None).await?; } } } - self.docker.remove_network(&self.network_name).await?; + self.docker.remove_network(&self.network_info.name).await?; - for volume_name in &self.volumes { + for volume_name in self.volumes.lock().await.iter() { self.docker.remove_volume(volume_name, None).await?; } - Ok(()) } @@ -244,7 +289,10 @@ impl DockerEnv { docker: Docker, container_id: String, log_path: PathBuf, + kind: &NodeKind, ) -> JoinHandle> { + info!("{} stdout logs available at : {}", kind, log_path.display()); + tokio::spawn(async move { if let Some(parent) = log_path.parent() { tokio::fs::create_dir_all(parent) @@ -277,4 +325,20 @@ impl DockerEnv { Ok(()) }) } + + fn dump_logs_cli(&self, container_id: &str) -> Result<()> { + let n_lines = std::env::var("TAIL_N_LINES").unwrap_or_else(|_| "25".to_string()); + + let output = std::process::Command::new("docker") + .args(&["logs", container_id, "-n", &n_lines]) + .output()?; + + debug!("{}", String::from_utf8_lossy(&output.stdout)); + + if !output.stderr.is_empty() { + error!("{}", String::from_utf8_lossy(&output.stderr)); + } + + Ok(()) + } } diff --git a/src/framework.rs b/src/framework.rs index 5e66d87..dc22b33 100644 --- a/src/framework.rs +++ b/src/framework.rs @@ -1,22 +1,38 @@ use std::{ future::Future, + path::{Path, PathBuf}, sync::{Arc, Once}, }; +use anyhow::Context; use bitcoincore_rpc::RpcApi; use tracing::{debug, info}; use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; use super::{ - bitcoin::BitcoinNodeCluster, config::TestConfig, docker::DockerEnv, full_node::FullNode, - node::NodeKind, sequencer::Sequencer, traits::NodeT, Result, + bitcoin::BitcoinNodeCluster, docker::DockerEnv, full_node::FullNode, node::NodeKind, + sequencer::Sequencer, traits::NodeT, Result, +}; +use super::{ + config::{ + BitcoinConfig, FullBatchProverConfig, FullFullNodeConfig, FullSequencerConfig, + RollupConfig, TestCaseConfig, TestConfig, + }, + utils::{copy_directory, get_available_port}, }; use crate::{ batch_prover::BatchProver, light_client_prover::LightClientProver, log_provider::{LogPathProvider, LogPathProviderErased}, + test_case::TestCase, utils::tail_file, }; +use crate::{ + config::{ + BitcoinServiceConfig, FullLightClientProverConfig, RpcConfig, RunnerConfig, StorageConfig, + }, + utils::{get_default_genesis_path, get_workspace_root}, +}; pub struct TestContext { pub config: TestConfig, @@ -24,12 +40,7 @@ pub struct TestContext { } impl TestContext { - async fn new(config: TestConfig) -> Self { - let docker = if config.test_case.docker { - Some(DockerEnv::new(config.test_case.n_nodes).await.unwrap()) - } else { - None - }; + async fn new(config: TestConfig, docker: Option) -> Self { Self { config, docker: Arc::new(docker), @@ -56,15 +67,23 @@ async fn create_optional(pred: bool, f: impl Future>) -> R } impl TestFramework { - pub async fn new(config: TestConfig) -> Result { + pub async fn new() -> Result { setup_logging(); + let test_case = T::test_config(); + let docker = if test_case.docker.enabled() { + Some(DockerEnv::new().await?) + } else { + None + }; + let config = Self::generate_test_config::(test_case, &docker).await?; + anyhow::ensure!( config.test_case.n_nodes > 0, "At least one bitcoin node has to be running" ); - let ctx = TestContext::new(config).await; + let ctx = TestContext::new(config, docker).await; let bitcoin_nodes = BitcoinNodeCluster::new(&ctx).await?; @@ -83,22 +102,25 @@ impl TestFramework { // Has to initialize sequencer first since provers and full node depend on it self.sequencer = create_optional( self.ctx.config.test_case.with_sequencer, - Sequencer::new(&self.ctx.config.sequencer), + Sequencer::new(&self.ctx.config.sequencer, Arc::clone(&self.ctx.docker)), ) .await?; (self.batch_prover, self.light_client_prover, self.full_node) = tokio::try_join!( create_optional( self.ctx.config.test_case.with_batch_prover, - BatchProver::new(&self.ctx.config.batch_prover) + BatchProver::new(&self.ctx.config.batch_prover, Arc::clone(&self.ctx.docker)) ), create_optional( self.ctx.config.test_case.with_light_client_prover, - LightClientProver::new(&self.ctx.config.light_client_prover) + LightClientProver::new( + &self.ctx.config.light_client_prover, + Arc::clone(&self.ctx.docker) + ) ), create_optional( self.ctx.config.test_case.with_full_node, - FullNode::new(&self.ctx.config.full_node) + FullNode::new(&self.ctx.config.full_node, Arc::clone(&self.ctx.docker)) ), )?; @@ -132,7 +154,7 @@ impl TestFramework { .collect() } - pub fn dump_log(&self) -> Result<()> { + pub fn dump_logs(&self) -> Result<()> { debug!("Dumping logs:"); let n_lines = std::env::var("TAIL_N_LINES") @@ -224,6 +246,247 @@ impl TestFramework { self.initial_da_height = da.get_block_count().await?; Ok(()) } + + async fn generate_test_config( + test_case: TestCaseConfig, + docker: &Option, + ) -> Result { + let env = T::test_env(); + let bitcoin = T::bitcoin_config(); + let batch_prover = T::batch_prover_config(); + let light_client_prover = T::light_client_prover_config(); + let sequencer = T::sequencer_config(); + let sequencer_rollup = RollupConfig::default(); + let batch_prover_rollup = RollupConfig::default(); + let light_client_prover_rollup = RollupConfig::default(); + let full_node_rollup = RollupConfig::default(); + + let [bitcoin_dir, dbs_dir, batch_prover_dir, light_client_prover_dir, sequencer_dir, full_node_dir, genesis_dir, tx_backup_dir] = + create_dirs(&test_case.dir)?; + + copy_genesis_dir(&test_case.genesis_dir, &genesis_dir)?; + + let mut bitcoin_confs = vec![]; + for i in 0..test_case.n_nodes { + let data_dir = bitcoin_dir.join(i.to_string()); + std::fs::create_dir_all(&data_dir) + .with_context(|| format!("Failed to create {} directory", data_dir.display()))?; + + let p2p_port = get_available_port()?; + let rpc_port = get_available_port()?; + + bitcoin_confs.push(BitcoinConfig { + p2p_port, + rpc_port, + data_dir, + env: env.bitcoin().clone(), + idx: i, + ..bitcoin.clone() + }); + } + + if let Some(docker) = docker.as_ref() { + bitcoin_confs[0].docker_ip = Some(docker.get_hostname(&NodeKind::Bitcoin)); + } + + // Target first bitcoin node as DA for now + let da_config: BitcoinServiceConfig = bitcoin_confs[0].clone().into(); + + let runner_bind_host = match docker.as_ref() { + Some(d) => d.get_hostname(&NodeKind::Sequencer), + None => sequencer_rollup.rpc.bind_host.clone(), + }; + + let bind_host = match docker.as_ref() { + Some(_) => "0.0.0.0".to_string(), + None => sequencer_rollup.rpc.bind_host.clone(), + }; + + let sequencer_rollup = { + let bind_port = get_available_port()?; + let node_kind = NodeKind::Sequencer.to_string(); + RollupConfig { + da: BitcoinServiceConfig { + da_private_key: Some( + "045FFC81A3C1FDB3AF1359DBF2D114B0B3EFBF7F29CC9C5DA01267AA39D2C78D" + .to_string(), + ), + node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), + tx_backup_dir: tx_backup_dir.display().to_string(), + ..da_config.clone() + }, + storage: StorageConfig { + path: dbs_dir.join(format!("{node_kind}-db")), + db_max_open_files: None, + }, + rpc: RpcConfig { + bind_port, + bind_host: bind_host.clone(), + ..sequencer_rollup.rpc + }, + ..sequencer_rollup + } + }; + + let runner_config = Some(RunnerConfig { + sequencer_client_url: format!( + "http://{}:{}", + runner_bind_host, sequencer_rollup.rpc.bind_port, + ), + include_tx_body: true, + accept_public_input_as_proven: Some(true), + sync_blocks_count: 10, + pruning_config: None, + }); + + let batch_prover_rollup = { + let bind_port = get_available_port()?; + let node_kind = NodeKind::BatchProver.to_string(); + RollupConfig { + da: BitcoinServiceConfig { + da_private_key: Some( + "75BAF964D074594600366E5B111A1DA8F86B2EFE2D22DA51C8D82126A0FCAC72" + .to_string(), + ), + node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), + tx_backup_dir: tx_backup_dir.display().to_string(), + ..da_config.clone() + }, + storage: StorageConfig { + path: dbs_dir.join(format!("{node_kind}-db")), + db_max_open_files: None, + }, + rpc: RpcConfig { + bind_port, + bind_host: bind_host.clone(), + ..batch_prover_rollup.rpc + }, + runner: runner_config.clone(), + ..batch_prover_rollup + } + }; + + let light_client_prover_rollup = { + let bind_port = get_available_port()?; + let node_kind = NodeKind::LightClientProver.to_string(); + RollupConfig { + da: BitcoinServiceConfig { + da_private_key: None, + node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), + tx_backup_dir: tx_backup_dir.display().to_string(), + ..da_config.clone() + }, + storage: StorageConfig { + path: dbs_dir.join(format!("{node_kind}-db")), + db_max_open_files: None, + }, + rpc: RpcConfig { + bind_port, + bind_host: bind_host.clone(), + ..light_client_prover_rollup.rpc + }, + runner: runner_config.clone(), + ..light_client_prover_rollup + } + }; + + let full_node_rollup = { + let bind_port = get_available_port()?; + let node_kind = NodeKind::FullNode.to_string(); + RollupConfig { + da: BitcoinServiceConfig { + node_url: format!( + "http://{}/wallet/{}", + da_config.node_url, + NodeKind::Bitcoin // Use default wallet + ), + tx_backup_dir: tx_backup_dir.display().to_string(), + ..da_config.clone() + }, + storage: StorageConfig { + path: dbs_dir.join(format!("{node_kind}-db")), + db_max_open_files: None, + }, + rpc: RpcConfig { + bind_port, + bind_host: bind_host.clone(), + ..full_node_rollup.rpc + }, + runner: runner_config.clone(), + ..full_node_rollup + } + }; + + Ok(TestConfig { + bitcoin: bitcoin_confs, + sequencer: FullSequencerConfig::new( + sequencer, + sequencer_rollup, + None, + sequencer_dir, + env.sequencer(), + )?, + batch_prover: FullBatchProverConfig::new( + batch_prover, + batch_prover_rollup, + None, + batch_prover_dir, + env.batch_prover(), + )?, + light_client_prover: FullLightClientProverConfig::new( + light_client_prover, + light_client_prover_rollup, + None, + light_client_prover_dir, + env.light_client_prover(), + )?, + full_node: FullFullNodeConfig::new( + (), + full_node_rollup, + None, + full_node_dir, + env.full_node(), + )?, + test_case, + }) + } +} + +fn create_dirs(base_dir: &Path) -> Result<[PathBuf; 8]> { + let paths = [ + NodeKind::Bitcoin.to_string(), + "dbs".to_string(), + NodeKind::BatchProver.to_string(), + NodeKind::LightClientProver.to_string(), + NodeKind::Sequencer.to_string(), + NodeKind::FullNode.to_string(), + "genesis".to_string(), + "inscription_txs".to_string(), + ] + .map(|dir| base_dir.join(dir)); + + for path in &paths { + std::fs::create_dir_all(path) + .with_context(|| format!("Failed to create {} directory", path.display()))?; + } + + Ok(paths) +} + +fn copy_genesis_dir(genesis_dir: &Option, target_dir: &Path) -> std::io::Result<()> { + let genesis_dir = + genesis_dir + .as_ref() + .map(PathBuf::from) + .map_or_else(get_default_genesis_path, |dir| { + if dir.is_absolute() { + dir + } else { + get_workspace_root().join(dir) + } + }); + + copy_directory(genesis_dir, target_dir) } static INIT: Once = Once::new(); diff --git a/src/node.rs b/src/node.rs index a11d76c..aebb05f 100644 --- a/src/node.rs +++ b/src/node.rs @@ -1,8 +1,9 @@ use std::{ - fmt, + fmt::{self, Debug}, fs::File, path::PathBuf, process::Stdio, + sync::Arc, time::{Duration, SystemTime}, }; @@ -13,18 +14,19 @@ use tokio::{ process::Command, time::{sleep, Instant}, }; -use tracing::{info, trace}; +use tracing::{debug, info, trace}; use crate::{ client::Client, - config::{config_to_file, RollupConfig}, + config::{DaLayer, DockerConfig, RollupConfig}, + docker::DockerEnv, log_provider::LogPathProvider, traits::{NodeT, Restart, SpawnOutput}, utils::{get_citrea_path, get_genesis_path}, Result, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, Hash, PartialEq)] pub enum NodeKind { Bitcoin, BatchProver, @@ -33,6 +35,18 @@ pub enum NodeKind { FullNode, } +impl NodeKind { + pub fn to_u8(&self) -> u8 { + match self { + NodeKind::Bitcoin => 1, + NodeKind::BatchProver => 2, + NodeKind::LightClientProver => 3, + NodeKind::Sequencer => 4, + NodeKind::FullNode => 5, + } + } +} + impl fmt::Display for NodeKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -55,17 +69,27 @@ pub trait Config: Clone { fn node_config(&self) -> Option<&Self::NodeConfig>; fn node_kind() -> NodeKind; fn rollup_config(&self) -> &RollupConfig; + fn da_layer(&self) -> DaLayer; + + // Get node config path argument and path. + // Not required for `full-node` + fn get_node_config_args(&self) -> Option>; + fn get_rollup_config_args(&self) -> Vec; } -pub struct Node { +pub struct Node { spawn_output: SpawnOutput, config: C, pub client: Client, } -impl Node { - pub async fn new(config: &C) -> Result { - let spawn_output = Self::spawn(config)?; +impl Node +where + C: Config + LogPathProvider + Send + Sync + Debug, + DockerConfig: From, +{ + pub async fn new(config: &C, docker: Arc>) -> Result { + let spawn_output = ::spawn(config, &docker).await?; let client = Client::new(config.rpc_bind_host(), config.rpc_bind_port())?; Ok(Self { @@ -75,32 +99,13 @@ impl Node { }) } - fn get_node_config_args(config: &C) -> Result> { - let dir = config.dir(); - let kind = C::node_kind(); - - config.node_config().map_or(Ok(Vec::new()), |node_config| { - let config_path = dir.join(format!("{kind}_config.toml")); - config_to_file(node_config, &config_path) - .with_context(|| format!("Error writing {kind} config to file"))?; - - let node_kind_str = match &kind { - NodeKind::BatchProver | NodeKind::LightClientProver => "prover".to_string(), - kind => kind.to_string(), - }; - Ok(vec![ - format!("--{node_kind_str}-config-path"), - config_path.display().to_string(), - ]) - }) - } - fn spawn(config: &C) -> Result { let citrea = get_citrea_path()?; - let dir = config.dir(); let kind = C::node_kind(); + debug!("Spawning {kind} with config {config:?}"); + let stdout_path = config.log_path(); let stdout_file = File::create(&stdout_path).context("Failed to create stdout file")?; info!( @@ -112,22 +117,8 @@ impl Node { let stderr_path = config.stderr_path(); let stderr_file = File::create(stderr_path).context("Failed to create stderr file")?; - // Handle full node not having any node config - let node_config_args = Self::get_node_config_args(config)?; - - let rollup_config_path = dir.join(format!("{kind}_rollup_config.toml")); - config_to_file(&config.rollup_config(), &rollup_config_path)?; - Command::new(citrea) - .arg("--da-layer") - .arg("bitcoin") - .arg("--rollup-config-path") - .arg(rollup_config_path) - .args(node_config_args) - .arg("--genesis-paths") - .arg(get_genesis_path( - dir.parent().expect("Couldn't get parent dir"), - )) + .args(get_citrea_args(config)) .envs(config.env()) .stdout(Stdio::from(stdout_file)) .stderr(Stdio::from(stderr_file)) @@ -165,13 +156,17 @@ impl Node { #[async_trait] impl NodeT for Node where - C: Config + LogPathProvider + Send + Sync, + C: Config + LogPathProvider + Send + Sync + Debug, + DockerConfig: From, { type Config = C; type Client = Client; - fn spawn(config: &Self::Config) -> Result { - Self::spawn(config) + async fn spawn(config: &Self::Config, docker: &Arc>) -> Result { + match docker.as_ref() { + Some(docker) => docker.spawn(config.to_owned().into()).await, + None => Self::spawn(config), + } } fn spawn_output(&mut self) -> &mut SpawnOutput { @@ -218,7 +213,8 @@ where #[async_trait] impl Restart for Node where - C: Config + LogPathProvider + Send + Sync, + C: Config + LogPathProvider + Send + Sync + Debug, + DockerConfig: From, { async fn wait_until_stopped(&mut self) -> Result<()> { self.stop().await?; @@ -238,3 +234,19 @@ where self.wait_for_ready(None).await } } + +pub fn get_citrea_args(config: &C) -> Vec +where + C: Config, +{ + let node_config_args = config.get_node_config_args().unwrap_or_default(); + let rollup_config_args = config.get_rollup_config_args(); + + vec![ + vec!["--da-layer".to_string(), config.da_layer().to_string()], + node_config_args, + rollup_config_args, + vec!["--genesis-paths".to_string(), get_genesis_path(config)], + ] + .concat() +} diff --git a/src/test_case.rs b/src/test_case.rs index dc04c67..d9401a4 100644 --- a/src/test_case.rs +++ b/src/test_case.rs @@ -1,33 +1,20 @@ //! This module provides the `TestCaseRunner` and `TestCase` trait for running and defining test cases. //! It handles setup, execution, and cleanup of test environments. -use std::{ - panic::{self}, - path::{Path, PathBuf}, - time::Duration, -}; +use std::{panic, time::Duration}; use anyhow::{bail, Context}; use async_trait::async_trait; use futures::FutureExt; use super::{ - config::{ - default_rollup_config, BitcoinConfig, FullBatchProverConfig, FullFullNodeConfig, - FullSequencerConfig, RollupConfig, TestCaseConfig, TestCaseEnv, TestConfig, - }, + config::{BitcoinConfig, TestCaseConfig, TestCaseEnv}, framework::TestFramework, - node::NodeKind, - utils::{copy_directory, get_available_port}, Result, }; use crate::{ - config::{ - BatchProverConfig, BitcoinServiceConfig, FullLightClientProverConfig, - LightClientProverConfig, RpcConfig, RunnerConfig, SequencerConfig, StorageConfig, - }, + config::{BatchProverConfig, LightClientProverConfig, SequencerConfig}, traits::NodeT, - utils::{get_default_genesis_path, get_workspace_root}, }; // TestCaseRunner manages the lifecycle of a test case, including setup, execution, and cleanup. @@ -84,7 +71,7 @@ impl TestCaseRunner { pub async fn run(mut self) -> Result<()> { let mut framework = None; let result = panic::AssertUnwindSafe(async { - framework = Some(TestFramework::new(Self::generate_test_config()?).await?); + framework = Some(TestFramework::new::().await?); let f = framework.as_mut().unwrap(); self.run_test_case(f).await }) @@ -96,7 +83,7 @@ impl TestCaseRunner { .with_context(|| format!("Framework not correctly initialized, result {result:?}"))?; if let Err(_) | Ok(Err(_)) = result { - if let Err(e) = f.dump_log() { + if let Err(e) = f.dump_logs() { eprintln!("Error dumping log: {e}"); } } @@ -117,190 +104,6 @@ impl TestCaseRunner { } } } - - fn generate_test_config() -> Result { - let test_case = T::test_config(); - let env = T::test_env(); - let bitcoin = T::bitcoin_config(); - let batch_prover = T::batch_prover_config(); - let light_client_prover = T::light_client_prover_config(); - let sequencer = T::sequencer_config(); - let sequencer_rollup = default_rollup_config(); - let batch_prover_rollup = default_rollup_config(); - let light_client_prover_rollup = default_rollup_config(); - let full_node_rollup = default_rollup_config(); - - let [bitcoin_dir, dbs_dir, batch_prover_dir, light_client_prover_dir, sequencer_dir, full_node_dir, genesis_dir, tx_backup_dir] = - create_dirs(&test_case.dir)?; - - copy_genesis_dir(&test_case.genesis_dir, &genesis_dir)?; - - let mut bitcoin_confs = vec![]; - for i in 0..test_case.n_nodes { - let data_dir = bitcoin_dir.join(i.to_string()); - std::fs::create_dir_all(&data_dir) - .with_context(|| format!("Failed to create {} directory", data_dir.display()))?; - - let p2p_port = get_available_port()?; - let rpc_port = get_available_port()?; - - bitcoin_confs.push(BitcoinConfig { - p2p_port, - rpc_port, - data_dir, - env: env.bitcoin().clone(), - idx: i, - ..bitcoin.clone() - }); - } - - // Target first bitcoin node as DA for now - let da_config: BitcoinServiceConfig = bitcoin_confs[0].clone().into(); - - let sequencer_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::Sequencer.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - da_private_key: Some( - "045FFC81A3C1FDB3AF1359DBF2D114B0B3EFBF7F29CC9C5DA01267AA39D2C78D" - .to_string(), - ), - node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), - tx_backup_dir: tx_backup_dir.display().to_string(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{node_kind}-db")), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..sequencer_rollup.rpc - }, - ..sequencer_rollup - } - }; - - let runner_config = Some(RunnerConfig { - sequencer_client_url: format!( - "http://{}:{}", - sequencer_rollup.rpc.bind_host, sequencer_rollup.rpc.bind_port, - ), - include_tx_body: true, - accept_public_input_as_proven: Some(true), - sync_blocks_count: 10, - pruning_config: None, - }); - - let batch_prover_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::BatchProver.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - da_private_key: Some( - "75BAF964D074594600366E5B111A1DA8F86B2EFE2D22DA51C8D82126A0FCAC72" - .to_string(), - ), - node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), - tx_backup_dir: tx_backup_dir.display().to_string(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{node_kind}-db")), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..batch_prover_rollup.rpc - }, - runner: runner_config.clone(), - ..batch_prover_rollup - } - }; - - let light_client_prover_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::LightClientProver.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - da_private_key: None, - node_url: format!("http://{}/wallet/{}", da_config.node_url, node_kind), - tx_backup_dir: tx_backup_dir.display().to_string(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{node_kind}-db")), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..light_client_prover_rollup.rpc - }, - runner: runner_config.clone(), - ..light_client_prover_rollup - } - }; - - let full_node_rollup = { - let bind_port = get_available_port()?; - let node_kind = NodeKind::FullNode.to_string(); - RollupConfig { - da: BitcoinServiceConfig { - node_url: format!( - "http://{}/wallet/{}", - da_config.node_url, - NodeKind::Bitcoin // Use default wallet - ), - tx_backup_dir: tx_backup_dir.display().to_string(), - ..da_config.clone() - }, - storage: StorageConfig { - path: dbs_dir.join(format!("{node_kind}-db")), - db_max_open_files: None, - }, - rpc: RpcConfig { - bind_port, - ..full_node_rollup.rpc - }, - runner: runner_config.clone(), - ..full_node_rollup - } - }; - - Ok(TestConfig { - bitcoin: bitcoin_confs, - sequencer: FullSequencerConfig { - rollup: sequencer_rollup, - dir: sequencer_dir, - docker_image: None, - node: sequencer, - env: env.sequencer(), - }, - batch_prover: FullBatchProverConfig { - rollup: batch_prover_rollup, - dir: batch_prover_dir, - docker_image: None, - node: batch_prover, - env: env.batch_prover(), - }, - light_client_prover: FullLightClientProverConfig { - rollup: light_client_prover_rollup, - dir: light_client_prover_dir, - docker_image: None, - node: light_client_prover, - env: env.light_client_prover(), - }, - full_node: FullFullNodeConfig { - rollup: full_node_rollup, - dir: full_node_dir, - docker_image: None, - node: (), - env: env.full_node(), - }, - test_case, - }) - } } /// Defines the interface for implementing test cases. @@ -365,40 +168,3 @@ pub trait TestCase: Send + Sync + 'static { Ok(()) } } - -fn create_dirs(base_dir: &Path) -> Result<[PathBuf; 8]> { - let paths = [ - NodeKind::Bitcoin.to_string(), - "dbs".to_string(), - NodeKind::BatchProver.to_string(), - NodeKind::LightClientProver.to_string(), - NodeKind::Sequencer.to_string(), - NodeKind::FullNode.to_string(), - "genesis".to_string(), - "inscription_txs".to_string(), - ] - .map(|dir| base_dir.join(dir)); - - for path in &paths { - std::fs::create_dir_all(path) - .with_context(|| format!("Failed to create {} directory", path.display()))?; - } - - Ok(paths) -} - -fn copy_genesis_dir(genesis_dir: &Option, target_dir: &Path) -> std::io::Result<()> { - let genesis_dir = - genesis_dir - .as_ref() - .map(PathBuf::from) - .map_or_else(get_default_genesis_path, |dir| { - if dir.is_absolute() { - dir - } else { - get_workspace_root().join(dir) - } - }); - - copy_directory(genesis_dir, target_dir) -} diff --git a/src/traits.rs b/src/traits.rs index c5fff5f..819e6b4 100644 --- a/src/traits.rs +++ b/src/traits.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use anyhow::Context; use async_trait::async_trait; @@ -6,13 +6,9 @@ use bollard::{container::StopContainerOptions, Docker}; use tokio::process::Child; use tracing::info; -use super::Result; +use crate::docker::{ContainerSpawnOutput, DockerEnv}; -#[derive(Debug)] -pub struct ContainerSpawnOutput { - pub id: String, - pub ip: String, -} +use super::Result; #[derive(Debug)] pub enum SpawnOutput { @@ -28,7 +24,7 @@ pub trait NodeT: Send { type Client; /// Spawn a new node with specific config and return its child - fn spawn(test_config: &Self::Config) -> Result; + async fn spawn(config: &Self::Config, docker: &Arc>) -> Result; fn spawn_output(&mut self) -> &mut SpawnOutput; fn config_mut(&mut self) -> &mut Self::Config; diff --git a/src/utils.rs b/src/utils.rs index e913c9d..12b11c1 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,15 +1,14 @@ use std::{ - fs, - fs::File, - io, - io::{BufRead, BufReader}, + fs::{self, File}, + io::{self, BufRead, BufReader}, net::TcpListener, path::{Path, PathBuf}, }; use anyhow::anyhow; use rand::{distributions::Alphanumeric, thread_rng, Rng}; -use tracing::debug; + +use crate::node::Config; use super::Result; @@ -44,8 +43,14 @@ pub fn get_default_genesis_path() -> PathBuf { path } -pub fn get_genesis_path(dir: &Path) -> PathBuf { - dir.join("genesis") +pub fn get_genesis_path(config: &impl Config) -> String { + config + .dir() + .parent() + .expect("Couldn't get parent dir") + .join("genesis") + .display() + .to_string() } pub fn generate_test_id() -> String { @@ -82,6 +87,7 @@ pub fn copy_directory(src: impl AsRef, dst: impl AsRef) -> io::Resul } pub fn tail_file(path: &Path, lines: usize) -> Result<()> { + println!("tailing path : {:?}", path); let file = File::open(path)?; let reader = BufReader::new(file); let mut last_lines = Vec::with_capacity(lines); @@ -95,7 +101,7 @@ pub fn tail_file(path: &Path, lines: usize) -> Result<()> { } for line in last_lines { - debug!("{line}"); + println!("{line}"); } Ok(()) diff --git a/tests/mod.rs b/tests/mod.rs new file mode 100644 index 0000000..d66627e --- /dev/null +++ b/tests/mod.rs @@ -0,0 +1,72 @@ +use async_trait::async_trait; +use bitcoincore_rpc::RpcApi; +use citrea_e2e::bitcoin::FINALITY_DEPTH; +use citrea_e2e::config::{SequencerConfig, TestCaseConfig, TestCaseDockerConfig}; +use citrea_e2e::framework::TestFramework; +use citrea_e2e::test_case::{TestCase, TestCaseRunner}; +use citrea_e2e::Result; + +struct DockerIntegrationTest; + +#[async_trait] +impl TestCase for DockerIntegrationTest { + fn test_config() -> TestCaseConfig { + TestCaseConfig { + with_batch_prover: true, + with_full_node: true, + docker: TestCaseDockerConfig { + bitcoin: true, + citrea: true, + }, + ..Default::default() + } + } + + fn sequencer_config() -> SequencerConfig { + SequencerConfig { + min_soft_confirmations_per_commitment: 10, + ..Default::default() + } + } + + async fn run_test(&mut self, f: &mut TestFramework) -> Result<()> { + let sequencer = f.sequencer.as_ref().unwrap(); + let batch_prover = f.batch_prover.as_ref().unwrap(); + let full_node = f.full_node.as_ref().unwrap(); + let da = f.bitcoin_nodes.get(0).unwrap(); + + let min_soft_confirmations_per_commitment = + sequencer.min_soft_confirmations_per_commitment(); + + for _ in 0..min_soft_confirmations_per_commitment { + sequencer.client.send_publish_batch_request().await?; + } + + da.generate(FINALITY_DEPTH, None).await?; + + // Wait for blob inscribe tx to be in mempool + da.wait_mempool_len(1, None).await?; + + da.generate(FINALITY_DEPTH, None).await?; + let finalized_height = da.get_finalized_height().await?; + batch_prover + .wait_for_l1_height(finalized_height, None) + .await?; + + let finalized_height = da.get_finalized_height().await?; + da.generate(FINALITY_DEPTH, None).await?; + + let commitments = full_node + .wait_for_sequencer_commitments(finalized_height, None) + .await?; + + assert_eq!(commitments.len(), 1); + + Ok(()) + } +} + +#[tokio::test] +async fn test_docker_integration() -> Result<()> { + TestCaseRunner::new(DockerIntegrationTest).run().await +}