From 38b528ffea1955d0e2e4b4d25529a9ac127bd295 Mon Sep 17 00:00:00 2001 From: Tomasz Polaczyk Date: Thu, 19 Sep 2024 18:25:11 +0200 Subject: [PATCH] Cleanup --- client/consensus/src/mocks.rs | 17 +-- client/service-container-chain/src/cli.rs | 4 +- client/service-container-chain/src/service.rs | 3 +- node/src/cli.rs | 13 +- node/src/command.rs | 42 +----- node/src/command/solochain.rs | 87 ++++++----- node/src/service.rs | 136 +++++++++--------- 7 files changed, 133 insertions(+), 169 deletions(-) diff --git a/client/consensus/src/mocks.rs b/client/consensus/src/mocks.rs index 9cd96a45b..13f2955ed 100644 --- a/client/consensus/src/mocks.rs +++ b/client/consensus/src/mocks.rs @@ -15,6 +15,7 @@ // along with Tanssi. If not, see . use { + crate::collators::lookahead::BuyCoreParams, crate::{ collators::lookahead::Params as LookAheadParams, OrchestratorAuraWorkerAuxData, SlotFrequency, @@ -23,7 +24,10 @@ use { cumulus_client_collator::service::CollatorService, cumulus_client_consensus_common::{ParachainBlockImportMarker, ValidationCodeHashProvider}, cumulus_client_consensus_proposer::Proposer as ConsensusProposer, - cumulus_primitives_core::{relay_chain::BlockId, CollationInfo, CollectCollationInfo, ParaId}, + cumulus_primitives_core::{ + relay_chain::{BlockId, ValidationCodeHash}, + CollationInfo, CollectCollationInfo, ParaId, + }, cumulus_relay_chain_interface::{ CommittedCandidateReceipt, OverseerHandle, RelayChainInterface, RelayChainResult, StorageValue, @@ -35,7 +39,10 @@ use { pallet_xcm_core_buyer_runtime_api::BuyingError, parity_scale_codec::Encode, polkadot_core_primitives::{Header as PHeader, InboundDownwardMessage, InboundHrmpMessage}, - polkadot_node_subsystem::messages::{RuntimeApiMessage, RuntimeApiRequest}, + polkadot_node_subsystem::{ + messages::{RuntimeApiMessage, RuntimeApiRequest}, + overseer, OverseerSignal, + }, polkadot_overseer::dummy::dummy_overseer_builder, polkadot_parachain_primitives::primitives::HeadData, polkadot_primitives::{ @@ -512,12 +519,6 @@ impl sc_consensus::Verifier for SealExtractorVerfier { } } -use crate::collators::lookahead::BuyCoreParams; -use { - cumulus_primitives_core::relay_chain::ValidationCodeHash, - polkadot_node_subsystem::{overseer, OverseerSignal}, -}; - pub struct DummyCodeHashProvider; impl ValidationCodeHashProvider for DummyCodeHashProvider { fn code_hash_at(&self, _at: PHash) -> Option { diff --git a/client/service-container-chain/src/cli.rs b/client/service-container-chain/src/cli.rs index 4c9ff727f..79bd41a89 100644 --- a/client/service-container-chain/src/cli.rs +++ b/client/service-container-chain/src/cli.rs @@ -14,17 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see -use sc_cli::CliConfiguration; -use url::Url; use { crate::chain_spec::RawGenesisConfig, cumulus_client_cli::{CollatorOptions, RelayChainMode}, dc_orchestrator_chain_interface::ContainerChainGenesisData, dp_container_chain_genesis_data::json::properties_to_map, sc_chain_spec::ChainSpec, + sc_cli::CliConfiguration, sc_network::config::MultiaddrWithPeerId, sp_runtime::Storage, std::{collections::BTreeMap, net::SocketAddr}, + url::Url, }; /// The `run` command used to run a container chain node. diff --git a/client/service-container-chain/src/service.rs b/client/service-container-chain/src/service.rs index 07111fc23..d3c8dfa9f 100644 --- a/client/service-container-chain/src/service.rs +++ b/client/service-container-chain/src/service.rs @@ -52,7 +52,7 @@ use { substrate_prometheus_endpoint::Registry, tc_consensus::{ collators::lookahead::{ - self as lookahead_tanssi_aura, Params as LookaheadTanssiAuraParams, + self as lookahead_tanssi_aura, BuyCoreParams, Params as LookaheadTanssiAuraParams, }, OrchestratorAuraWorkerAuxData, }, @@ -61,7 +61,6 @@ use { #[allow(deprecated)] use sc_executor::NativeElseWasmExecutor; -use tc_consensus::collators::lookahead::BuyCoreParams; type FullBackend = TFullBackend; diff --git a/node/src/cli.rs b/node/src/cli.rs index ebb1d8c07..9208a3b3f 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . -use tc_service_container_chain::cli::ContainerChainRunCmd; use { node_common::service::Sealing, sc_cli::{CliConfiguration, NodeKeyParams, SharedParams}, std::path::PathBuf, + tc_service_container_chain::cli::ContainerChainRunCmd, }; /// Sub-commands supported by the collator. @@ -86,17 +86,6 @@ pub struct SoloChainCmd { #[arg(long)] pub no_hardware_benchmarks: bool, - /* - /// Enable the development service to run without a backing relay chain - #[arg(long)] - pub dev_service: bool, - - /// When blocks should be sealed in the dev service. - /// - /// Options are "instant", "manual", or timer interval in milliseconds - #[arg(long, default_value = "instant")] - pub sealing: Sealing, - */ /// Relay chain arguments #[arg(raw = true)] pub relay_chain_args: Vec, diff --git a/node/src/command.rs b/node/src/command.rs index 419fcefec..50f287ec6 100644 --- a/node/src/command.rs +++ b/node/src/command.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Tanssi. If not, see . +use crate::command::solochain::relay_chain_cli_new; use { crate::{ chain_spec, @@ -343,8 +344,8 @@ pub fn run() -> Result<()> { Some(Subcommand::SoloChain(cmd)) => { // Cannot use create_configuration function because that needs a chain spec. // So write our own `create_runner` function that doesn't need chain spec. - let normalized_run = cmd.run.normalize(); - let runner = solochain::create_runner(&cli, &normalized_run)?; + let container_chain_cli = cmd.run.normalize(); + let runner = solochain::create_runner(&cli, &container_chain_cli)?; // TODO: Assert that there are no flags between `tanssi-node` and `solo-chain`. // These will be ignored anyway. @@ -359,56 +360,23 @@ pub fn run() -> Result<()> { let collator_options = cmd.run.collator_options(); runner.run_node_until_exit(|config| async move { - /* - let polkadot_cli = RelayChainCli::new( + let polkadot_cli = relay_chain_cli_new( &config, [RelayChainCli::executable_name()] .iter() .chain(cmd.relay_chain_args.iter()), ); - */ - // TODO: refactor this into function that returns `RelayChainCli` - let binding = [RelayChainCli::executable_name()]; - let relay_chain_args = binding.iter().chain(cmd.relay_chain_args.iter()); - let polkadot_cli = { - let base_path = config.base_path.path().join("polkadot"); - - RelayChainCli { - base_path, - chain_id: Some(config.relay_chain.clone()), - base: clap::Parser::parse_from(relay_chain_args), - } - }; - - // TODO: dev mode does not make sense for starlight collator? - // But better to detect --dev flag and panic than ignore it - /* - let dev_service = config.chain_spec.is_dev() - || relay_chain_id == Some("dev-service".to_string()) - || cli_run_dev_service; - - if dev_service { - return crate::service::start_dev_node(config, cli_run_sealing, hwbench, id) - .map_err(Into::into); - } - */ - let tokio_handle = config.tokio_handle.clone(); let polkadot_config = SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, tokio_handle) .map_err(|err| format!("Relay chain argument error: {}", err))?; - // We need to bake in some container-chain args - let container_chain_cli = normalized_run; - let tokio_handle = config.tokio_handle.clone(); - let container_chain_config = (container_chain_cli, tokio_handle); - // TODO: we can't enable hwbench because we don't have a db. Find a workaround let hwbench = None; crate::service::start_solochain_node( polkadot_config, - container_chain_config, + container_chain_cli, collator_options, hwbench, ) diff --git a/node/src/command/solochain.rs b/node/src/command/solochain.rs index 19ea16a33..1a56e7ea6 100644 --- a/node/src/command/solochain.rs +++ b/node/src/command/solochain.rs @@ -16,7 +16,7 @@ //! Helper functions used to implement solochain collator -use crate::cli::Cli; +use crate::cli::{Cli, RelayChainCli}; use futures::FutureExt; use jsonrpsee::server::BatchRequestConfig; use log::{info, warn}; @@ -249,6 +249,20 @@ fn init_cmd, DVC: DefaultConfigurationValues>( Ok(()) } +/// Equivalent to [RelayChainCli::new] +pub fn relay_chain_cli_new<'a>( + config: &SolochainConfig, + relay_chain_args: impl Iterator, +) -> RelayChainCli { + let base_path = config.base_path.path().join("polkadot"); + + RelayChainCli { + base_path, + chain_id: Some(config.relay_chain.clone()), + base: clap::Parser::parse_from(relay_chain_args), + } +} + /// Create a dummy [Configuration] that should only be used as input to polkadot-sdk functions that /// take this struct as input but only use one field of it. /// This is needed because [Configuration] does not implement [Default]. @@ -333,12 +347,11 @@ pub fn dummy_config(tokio_handle: tokio::runtime::Handle, base_path: BasePath) - /// Returns the default path for configuration directory based on the chain_spec pub(crate) fn build_solochain_config_dir(base_path: &PathBuf) -> PathBuf { - // Original: Collator1000-01/chains/dancebox/ - //base_path.path().join("chains").join(chain_id) - // Starlight: Collator1000-01/config/ + // base_path: Collator1000-01/data/containers + // config_dir: Collator1000-01/data/config let mut base_path = base_path.clone(); - // Remove "/containers" base_path.pop(); + base_path.join("config") } @@ -365,46 +378,44 @@ fn zombienet_keystore_path(keystore: &KeystoreConfig) -> PathBuf { /// When running under zombienet, collator keys are injected in a different folder from what we /// expect. This function will check if the zombienet folder exists, and if so, copy all the keys /// from there into the expected folder. -pub fn copy_zombienet_keystore(keystore: &KeystoreConfig) { - // TODO: error handling? Or assume keystore_path always exists? +pub fn copy_zombienet_keystore(keystore: &KeystoreConfig) -> std::io::Result<()> { let keystore_path = keystore.path().unwrap(); let zombienet_path = zombienet_keystore_path(keystore); if zombienet_path.exists() { // Copy to keystore folder - - // https://stackoverflow.com/a/65192210 - // TODO: use a crate instead - // TODO: never overwrite files, only copy those that don't exist - fn copy_dir_all( - src: impl AsRef, - dst: impl AsRef, - files_copied: &mut u32, - ) -> std::io::Result<()> { - use std::fs; - fs::create_dir_all(&dst)?; - for entry in fs::read_dir(src)? { - let entry = entry?; - let ty = entry.file_type()?; - if ty.is_dir() { - copy_dir_all( - entry.path(), - dst.as_ref().join(entry.file_name()), - files_copied, - )?; - } else { - fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?; - *files_copied += 1; - } - } - Ok(()) - } - let mut files_copied = 0; - copy_dir_all(zombienet_path, keystore_path, &mut files_copied).unwrap(); + copy_dir_all(zombienet_path, keystore_path, &mut files_copied)?; log::info!("Copied {} keys from zombienet keystore", files_copied); + + Ok(()) } else { - // TODO: remove this log before merging - log::warn!("Copy nimbus keys to {:?}", keystore_path); + // Zombienet folder does not exist, assume we are not running under zombienet + Ok(()) } } + +// https://stackoverflow.com/a/65192210 +fn copy_dir_all( + src: impl AsRef, + dst: impl AsRef, + files_copied: &mut u32, +) -> std::io::Result<()> { + use std::fs; + fs::create_dir_all(&dst)?; + for entry in fs::read_dir(src)? { + let entry = entry?; + let ty = entry.file_type()?; + if ty.is_dir() { + copy_dir_all( + entry.path(), + dst.as_ref().join(entry.file_name()), + files_copied, + )?; + } else { + fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?; + *files_copied += 1; + } + } + Ok(()) +} diff --git a/node/src/service.rs b/node/src/service.rs index c8627a194..a5aeaf982 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -705,22 +705,18 @@ fn keystore_config( pub async fn start_solochain_node( // Parachain config not used directly, but we need it to derive the default values for some container_config args polkadot_config: Configuration, - container_chain_config: (ContainerChainCli, tokio::runtime::Handle), + container_chain_cli: ContainerChainCli, collator_options: CollatorOptions, hwbench: Option, ) -> sc_service::error::Result { + let tokio_handle = polkadot_config.tokio_handle.clone(); let orchestrator_para_id = Default::default(); - //orchestrator_config.announce_block = false; // prepare_node_config() let chain_type = polkadot_config.chain_spec.chain_type().clone(); let relay_chain = polkadot_config.chain_spec.id().to_string(); - // Channel to send messages to start/stop container chains - let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel(); - let (telemetry_worker_handle, mut task_manager, keystore_container) = { - let base_path = container_chain_config - .0 + let base_path = container_chain_cli .base .base .shared_params @@ -729,8 +725,8 @@ pub async fn start_solochain_node( .expect("base_path is always set"); let config_dir = build_solochain_config_dir(&base_path); let _net_config_dir = build_solochain_net_config_dir(&config_dir); - let keystore = - keystore_config(container_chain_config.0.keystore_params(), &config_dir).unwrap(); + let keystore = keystore_config(container_chain_cli.keystore_params(), &config_dir) + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; // Instead of putting keystore in // Collator1000-01/data/chains/simple_container_2000/keystore @@ -739,16 +735,17 @@ pub async fn start_solochain_node( // And same for "network" folder // But zombienet will put the keys in the old path, so we need to manually copy it if we // are running under zombienet - copy_zombienet_keystore(&keystore); + copy_zombienet_keystore(&keystore)?; let keystore_container = KeystoreContainer::new(&keystore)?; let task_manager = { //let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); let registry = None; - TaskManager::new(container_chain_config.1.clone(), registry)? + TaskManager::new(tokio_handle.clone(), registry)? }; + // TODO: impl telemetry /* let telemetry = parachain_config .telemetry_endpoints @@ -779,7 +776,7 @@ pub async fn start_solochain_node( // Dummy parachain config only needed because `build_relay_chain_interface` needs to know if we // are collators or not - let validator = container_chain_config.0.base.collator; + let validator = container_chain_cli.base.collator; let mut dummy_parachain_config = dummy_config( polkadot_config.tokio_handle.clone(), polkadot_config.base_path.clone(), @@ -821,6 +818,8 @@ pub async fn start_solochain_node( relay_chain_interface: relay_chain_interface.clone(), }; let orchestrator_chain_interface = orchestrator_chain_interface_builder.build(); + // Channel to send messages to start/stop container chains + let (cc_spawn_tx, cc_spawn_rx) = unbounded_channel(); if validator { // Start task which detects para id assignment, and starts/stops container chains. @@ -832,70 +831,67 @@ pub async fn start_solochain_node( ); } - { - let (container_chain_cli, tokio_handle) = container_chain_config; - // If the orchestrator chain is running as a full-node, we start a full node for the - // container chain immediately, because only collator nodes detect their container chain - // assignment so otherwise it will never start. - if !validator { - if let Some(container_chain_para_id) = container_chain_cli.base.para_id { - // Spawn new container chain node - cc_spawn_tx - .send(CcSpawnMsg::UpdateAssignment { - current: Some(container_chain_para_id.into()), - next: Some(container_chain_para_id.into()), - }) - .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; - } + // If the orchestrator chain is running as a full-node, we start a full node for the + // container chain immediately, because only collator nodes detect their container chain + // assignment so otherwise it will never start. + if !validator { + if let Some(container_chain_para_id) = container_chain_cli.base.para_id { + // Spawn new container chain node + cc_spawn_tx + .send(CcSpawnMsg::UpdateAssignment { + current: Some(container_chain_para_id.into()), + next: Some(container_chain_para_id.into()), + }) + .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?; } + } - // Start container chain spawner task. This will start and stop container chains on demand. - let spawn_handle = task_manager.spawn_handle(); + // Start container chain spawner task. This will start and stop container chains on demand. + let spawn_handle = task_manager.spawn_handle(); - let container_chain_spawner = ContainerChainSpawner { - params: ContainerChainSpawnParams { - orchestrator_chain_interface, - container_chain_cli, - tokio_handle, - chain_type, - relay_chain, - relay_chain_interface, - sync_keystore, - orchestrator_para_id, - collation_params: if validator { - Some(spawner::CollationParams { - // TODO: all these args must be solochain instead of orchestrator - orchestrator_client: None, - orchestrator_tx_pool: None, - orchestrator_para_id, - collator_key: collator_key - .expect("there should be a collator key if we're a validator"), - solochain: true, - }) - } else { - None - }, - spawn_handle, - data_preserver: false, + let container_chain_spawner = ContainerChainSpawner { + params: ContainerChainSpawnParams { + orchestrator_chain_interface, + container_chain_cli, + tokio_handle, + chain_type, + relay_chain, + relay_chain_interface, + sync_keystore, + orchestrator_para_id, + collation_params: if validator { + Some(spawner::CollationParams { + // TODO: all these args must be solochain instead of orchestrator + orchestrator_client: None, + orchestrator_tx_pool: None, + orchestrator_para_id, + collator_key: collator_key + .expect("there should be a collator key if we're a validator"), + solochain: true, + }) + } else { + None }, - state: Default::default(), - collate_on_tanssi, - collation_cancellation_constructs: None, - }; - let state = container_chain_spawner.state.clone(); + spawn_handle, + data_preserver: false, + }, + state: Default::default(), + collate_on_tanssi, + collation_cancellation_constructs: None, + }; + let state = container_chain_spawner.state.clone(); - task_manager.spawn_essential_handle().spawn( - "container-chain-spawner-rx-loop", - None, - container_chain_spawner.rx_loop(cc_spawn_rx, validator, true), - ); + task_manager.spawn_essential_handle().spawn( + "container-chain-spawner-rx-loop", + None, + container_chain_spawner.rx_loop(cc_spawn_rx, validator, true), + ); - task_manager.spawn_essential_handle().spawn( - "container-chain-spawner-debug-state", - None, - monitor::monitor_task(state), - ) - } + task_manager.spawn_essential_handle().spawn( + "container-chain-spawner-debug-state", + None, + monitor::monitor_task(state), + ); Ok(task_manager) }