From f3a5e256da6f18fd019478c66419f6f07d98d64d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 4 Oct 2024 10:27:30 +1000 Subject: [PATCH 1/8] Implement Subnet Sampling for PeerDAS (#6410) * Add `SAMPLES_PER_SLOT` config. * Rename `sampling` module to `peer_sampling` * Implement subnet sampling. * Update lookup test. * Merge branch 'unstable' into subnet-sampling * Merge branch 'unstable' into subnet-sampling # Conflicts: # beacon_node/beacon_chain/src/data_availability_checker.rs # beacon_node/http_api/src/publish_blocks.rs # beacon_node/lighthouse_network/src/types/globals.rs # beacon_node/network/src/sync/manager.rs * Merge branch 'unstable' into subnet-sampling --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 + .../src/data_availability_checker.rs | 22 ++++----- .../overflow_lru_cache.rs | 24 +++++----- .../beacon_chain/src/historical_blocks.rs | 4 +- beacon_node/http_api/src/publish_blocks.rs | 9 ++-- .../lighthouse_network/src/types/globals.rs | 48 ++++++++++++------- beacon_node/network/src/service.rs | 2 +- .../network/src/sync/block_lookups/tests.rs | 7 +-- beacon_node/network/src/sync/manager.rs | 2 +- beacon_node/network/src/sync/mod.rs | 2 +- .../network/src/sync/network_context.rs | 18 +++---- .../sync/{sampling.rs => peer_sampling.rs} | 0 .../network/src/sync/range_sync/chain.rs | 26 +++++----- .../chiado/config.yaml | 3 +- .../gnosis/config.yaml | 3 +- .../holesky/config.yaml | 3 +- .../mainnet/config.yaml | 3 +- .../sepolia/config.yaml | 3 +- consensus/types/src/chain_spec.rs | 18 ++++++- .../environment/tests/testnet_dir/config.yaml | 3 +- 20 files changed, 122 insertions(+), 80 deletions(-) rename beacon_node/network/src/sync/{sampling.rs => peer_sampling.rs} (100%) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2262325642d..13022b82699 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3851,6 +3851,8 @@ impl BeaconChain { } if let Some(data_columns) = data_columns { + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec. if !data_columns.is_empty() { debug!( self.log, "Writing data_columns to store"; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 4d5afdc8904..395f40c5dba 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -108,13 +108,15 @@ impl DataAvailabilityChecker { spec.custody_requirement as usize }; - let custody_column_count = - custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); + let subnet_sampling_size = + std::cmp::max(custody_subnet_count, spec.samples_per_slot as usize); + let sampling_column_count = + subnet_sampling_size.saturating_mul(spec.data_columns_per_subnet()); let inner = DataAvailabilityCheckerInner::new( OVERFLOW_LRU_CAPACITY, store, - custody_column_count, + sampling_column_count, spec.clone(), )?; Ok(Self { @@ -125,10 +127,8 @@ impl DataAvailabilityChecker { }) } - pub fn get_custody_columns_count(&self) -> usize { - self.availability_cache - .custody_subnet_count() - .saturating_mul(self.spec.data_columns_per_subnet()) + pub fn get_sampling_column_count(&self) -> usize { + self.availability_cache.sampling_column_count() } /// Checks if the block root is currenlty in the availability cache awaiting import because @@ -141,9 +141,9 @@ impl DataAvailabilityChecker { .get_execution_valid_block(block_root) } - /// Return the set of imported blob indexes for `block_root`. Returns None if there is no block + /// Return the set of cached blob indexes for `block_root`. Returns None if there is no block /// component for `block_root`. - pub fn imported_blob_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_blob_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| { @@ -156,9 +156,9 @@ impl DataAvailabilityChecker { }) } - /// Return the set of imported custody column indexes for `block_root`. Returns None if there is + /// Return the set of cached custody column indexes for `block_root`. Returns None if there is /// no block component for `block_root`. - pub fn imported_custody_column_indexes(&self, block_root: &Hash256) -> Option> { + pub fn cached_data_column_indexes(&self, block_root: &Hash256) -> Option> { self.availability_cache .peek_pending_components(block_root, |components| { components.map(|components| components.get_cached_data_columns_indices()) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 46ab08a8215..8f91bf34fcc 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -40,7 +40,7 @@ pub struct PendingComponents { pub enum BlockImportRequirement { AllBlobs, - CustodyColumns(usize), + ColumnSampling(usize), } impl PendingComponents { @@ -210,7 +210,7 @@ impl PendingComponents { .map_or(false, |num_expected_blobs| { num_expected_blobs == self.num_received_blobs() }), - BlockImportRequirement::CustodyColumns(num_expected_columns) => { + BlockImportRequirement::ColumnSampling(num_expected_columns) => { let num_received_data_columns = self.num_received_data_columns(); // No data columns when there are 0 blobs self.num_expected_blobs() @@ -281,7 +281,7 @@ impl PendingComponents { }; (Some(VariableList::new(verified_blobs)?), None) } - BlockImportRequirement::CustodyColumns(_) => { + BlockImportRequirement::ColumnSampling(_) => { let verified_data_columns = verified_data_columns .into_iter() .map(|d| d.into_inner()) @@ -353,8 +353,8 @@ pub struct DataAvailabilityCheckerInner { /// This cache holds a limited number of states in memory and reconstructs them /// from disk when necessary. This is necessary until we merge tree-states state_cache: StateLRUCache, - /// The number of data columns the node is custodying. - custody_column_count: usize, + /// The number of data columns the node is sampling via subnet sampling. + sampling_column_count: usize, spec: Arc, } @@ -362,19 +362,19 @@ impl DataAvailabilityCheckerInner { pub fn new( capacity: NonZeroUsize, beacon_store: BeaconStore, - custody_column_count: usize, + sampling_column_count: usize, spec: Arc, ) -> Result { Ok(Self { critical: RwLock::new(LruCache::new(capacity)), state_cache: StateLRUCache::new(beacon_store, spec.clone()), - custody_column_count, + sampling_column_count, spec, }) } - pub fn custody_subnet_count(&self) -> usize { - self.custody_column_count + pub fn sampling_column_count(&self) -> usize { + self.sampling_column_count } /// Returns true if the block root is known, without altering the LRU ordering @@ -440,8 +440,8 @@ impl DataAvailabilityCheckerInner { ) -> Result { let peer_das_enabled = self.spec.is_peer_das_enabled_for_epoch(epoch); if peer_das_enabled { - Ok(BlockImportRequirement::CustodyColumns( - self.custody_column_count, + Ok(BlockImportRequirement::ColumnSampling( + self.sampling_column_count, )) } else { Ok(BlockImportRequirement::AllBlobs) @@ -456,7 +456,7 @@ impl DataAvailabilityCheckerInner { block_import_requirement: &BlockImportRequirement, pending_components: &PendingComponents, ) -> bool { - let BlockImportRequirement::CustodyColumns(num_expected_columns) = block_import_requirement + let BlockImportRequirement::ColumnSampling(num_expected_columns) = block_import_requirement else { return false; }; diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 1372211b175..a23b6ddc1e5 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -94,7 +94,9 @@ impl BeaconChain { // Blobs are stored per block, and data columns are each stored individually let n_blob_ops_per_block = if self.spec.is_peer_das_scheduled() { - self.data_availability_checker.get_custody_columns_count() + // TODO(das): `available_block includes all sampled columns, but we only need to store + // custody columns. To be clarified in spec PR. + self.data_availability_checker.get_sampling_column_count() } else { 1 }; diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 16364b435a9..fceeb2dd231 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -389,18 +389,17 @@ pub async fn publish_block>( .count() > 0 { - let custody_columns_indices = &network_globals.custody_columns; - - let custody_columns = gossip_verified_data_columns + let sampling_columns_indices = &network_globals.sampling_columns; + let sampling_columns = gossip_verified_data_columns .into_iter() .flatten() - .filter(|data_column| custody_columns_indices.contains(&data_column.index())) + .filter(|data_column| sampling_columns_indices.contains(&data_column.index())) .collect(); // Importing the columns could trigger block import and network publication in the case // where the block was already seen on gossip. if let Err(e) = - Box::pin(chain.process_gossip_data_columns(custody_columns, publish_fn)).await + Box::pin(chain.process_gossip_data_columns(sampling_columns, publish_fn)).await { let msg = format!("Invalid data column: {e}"); return if let BroadcastValidation::Gossip = validation_level { diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index f271c9ff722..bcebd02a0ed 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -26,9 +26,9 @@ pub struct NetworkGlobals { pub sync_state: RwLock, /// The current state of the backfill sync. pub backfill_state: RwLock, - /// The computed custody subnets and columns is stored to avoid re-computing. - pub custody_subnets: Vec, - pub custody_columns: Vec, + /// The computed sampling subnets and columns is stored to avoid re-computing. + pub sampling_subnets: Vec, + pub sampling_columns: Vec, /// Network-related configuration. Immutable after initialization. pub config: Arc, /// Ethereum chain configuration. Immutable after initialization. @@ -45,24 +45,31 @@ impl NetworkGlobals { config: Arc, spec: Arc, ) -> Self { - let (custody_subnets, custody_columns) = if spec.is_peer_das_scheduled() { + let (sampling_subnets, sampling_columns) = if spec.is_peer_das_scheduled() { + let node_id = enr.node_id().raw(); + let custody_subnet_count = local_metadata .custody_subnet_count() .copied() .expect("custody subnet count must be set if PeerDAS is scheduled"); - let custody_subnets = DataColumnSubnetId::compute_custody_subnets::( - enr.node_id().raw(), - custody_subnet_count, + + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); + + let sampling_subnets = DataColumnSubnetId::compute_custody_subnets::( + node_id, + subnet_sampling_size, &spec, ) - .expect("custody subnet count must be valid") + .expect("sampling subnet count must be valid") .collect::>(); - let custody_columns = custody_subnets + + let sampling_columns = sampling_subnets .iter() .flat_map(|subnet| subnet.columns::(&spec)) .sorted() .collect(); - (custody_subnets, custody_columns) + + (sampling_subnets, sampling_columns) } else { (vec![], vec![]) }; @@ -76,8 +83,8 @@ impl NetworkGlobals { gossipsub_subscriptions: RwLock::new(HashSet::new()), sync_state: RwLock::new(SyncState::Stalled), backfill_state: RwLock::new(BackFillState::NotRequired), - custody_subnets, - custody_columns, + sampling_subnets, + sampling_columns, config, spec, } @@ -197,12 +204,13 @@ mod test { use types::{Epoch, EthSpec, MainnetEthSpec as E}; #[test] - fn test_custody_subnets() { + fn test_sampling_subnets() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); let config = Arc::new(NetworkConfig::default()); @@ -213,17 +221,20 @@ mod test { config, Arc::new(spec), ); - assert_eq!(globals.custody_subnets.len(), custody_subnet_count as usize); + assert_eq!( + globals.sampling_subnets.len(), + subnet_sampling_size as usize + ); } #[test] - fn test_custody_columns() { + fn test_sampling_columns() { let log = logging::test_logger(); let mut spec = E::default_spec(); spec.eip7594_fork_epoch = Some(Epoch::new(0)); let custody_subnet_count = spec.data_column_sidecar_subnet_count / 2; - let custody_columns_count = spec.number_of_columns / 2; + let subnet_sampling_size = std::cmp::max(custody_subnet_count, spec.samples_per_slot); let metadata = get_metadata(custody_subnet_count); let config = Arc::new(NetworkConfig::default()); @@ -234,7 +245,10 @@ mod test { config, Arc::new(spec), ); - assert_eq!(globals.custody_columns.len(), custody_columns_count); + assert_eq!( + globals.sampling_columns.len(), + subnet_sampling_size as usize + ); } fn get_metadata(custody_subnet_count: u64) -> MetaData { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f36d11ecdd1..5a66cb7f30d 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -813,7 +813,7 @@ impl NetworkService { } } } else { - for column_subnet in &self.network_globals.custody_subnets { + for column_subnet in &self.network_globals.sampling_subnets { for fork_digest in self.required_gossip_fork_digests() { let gossip_kind = Subnet::DataColumn(*column_subnet).into(); let topic = diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 4c73e8f8d03..151333a2ef5 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,6 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::sync::manager::{BlockProcessType, SyncManager}; -use crate::sync::sampling::SamplingConfig; +use crate::sync::peer_sampling::SamplingConfig; use crate::sync::{SamplingId, SyncMessage}; use crate::NetworkMessage; use std::sync::Arc; @@ -2037,9 +2037,10 @@ fn custody_lookup_happy_path() { // Should not request blobs let id = r.expect_block_lookup_request(block.canonical_root()); r.complete_valid_block_request(id, block.into(), true); - let custody_column_count = spec.custody_requirement * spec.data_columns_per_subnet() as u64; + // for each slot we download `samples_per_slot` columns + let sample_column_count = spec.samples_per_slot * spec.data_columns_per_subnet() as u64; let custody_ids = - r.expect_only_data_columns_by_root_requests(block_root, custody_column_count as usize); + r.expect_only_data_columns_by_root_requests(block_root, sample_column_count as usize); r.complete_valid_custody_request(custody_ids, data_columns, false); r.expect_no_active_lookups(); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f1417804849..c1f4fe54fb7 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -38,9 +38,9 @@ use super::block_lookups::BlockLookups; use super::network_context::{ BlockOrBlob, CustodyByRootResult, RangeRequestId, RpcEvent, SyncNetworkContext, }; +use super::peer_sampling::{Sampling, SamplingConfig, SamplingResult}; use super::peer_sync_info::{remote_sync_type, PeerSyncType}; use super::range_sync::{RangeSync, RangeSyncType, EPOCHS_PER_BATCH}; -use super::sampling::{Sampling, SamplingConfig, SamplingResult}; use crate::network_beacon_processor::{ChainSegmentProcessId, NetworkBeaconProcessor}; use crate::service::NetworkMessage; use crate::status::ToStatusMessage; diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index 6669add4453..1dca6f02ac2 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -6,9 +6,9 @@ mod block_lookups; mod block_sidecar_coupling; pub mod manager; mod network_context; +mod peer_sampling; mod peer_sync_info; mod range_sync; -mod sampling; pub use lighthouse_network::service::api_types::SamplingId; pub use manager::{BatchProcessResult, SyncMessage}; diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index dc35a141d21..492b703f8a2 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -418,13 +418,13 @@ impl SyncNetworkContext { false }; - let (expects_custody_columns, num_of_custody_column_req) = + let (expects_columns, num_of_column_req) = if matches!(batch_type, ByRangeRequestType::BlocksAndColumns) { - let custody_indexes = self.network_globals().custody_columns.clone(); + let column_indexes = self.network_globals().sampling_columns.clone(); let mut num_of_custody_column_req = 0; for (peer_id, columns_by_range_request) in - self.make_columns_by_range_requests(request, &custody_indexes)? + self.make_columns_by_range_requests(request, &column_indexes)? { requested_peers.push(peer_id); @@ -448,15 +448,15 @@ impl SyncNetworkContext { num_of_custody_column_req += 1; } - (Some(custody_indexes), Some(num_of_custody_column_req)) + (Some(column_indexes), Some(num_of_custody_column_req)) } else { (None, None) }; let info = RangeBlockComponentsRequest::new( expected_blobs, - expects_custody_columns, - num_of_custody_column_req, + expects_columns, + num_of_column_req, requested_peers, ); self.range_block_components_requests @@ -668,7 +668,7 @@ impl SyncNetworkContext { let imported_blob_indexes = self .chain .data_availability_checker - .imported_blob_indexes(&block_root) + .cached_blob_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let indices = (0..expected_blobs as u64) @@ -786,13 +786,13 @@ impl SyncNetworkContext { let custody_indexes_imported = self .chain .data_availability_checker - .imported_custody_column_indexes(&block_root) + .cached_data_column_indexes(&block_root) .unwrap_or_default(); // Include only the blob indexes not yet imported (received through gossip) let custody_indexes_to_fetch = self .network_globals() - .custody_columns + .sampling_columns .clone() .into_iter() .filter(|index| !custody_indexes_imported.contains(index)) diff --git a/beacon_node/network/src/sync/sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs similarity index 100% rename from beacon_node/network/src/sync/sampling.rs rename to beacon_node/network/src/sync/peer_sampling.rs diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index ed5946ada72..732e4a7bd16 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -444,9 +444,9 @@ impl SyncingChain { self.request_batches(network)?; } } - } else if !self.good_peers_on_custody_subnets(self.processing_target, network) { + } else if !self.good_peers_on_sampling_subnets(self.processing_target, network) { // This is to handle the case where no batch was sent for the current processing - // target when there is no custody peers available. This is a valid state and should not + // target when there is no sampling peers available. This is a valid state and should not // return an error. return Ok(KeepChain); } else { @@ -1075,10 +1075,10 @@ impl SyncingChain { // check if we have the batch for our optimistic start. If not, request it first. // We wait for this batch before requesting any other batches. if let Some(epoch) = self.optimistic_start { - if !self.good_peers_on_custody_subnets(epoch, network) { + if !self.good_peers_on_sampling_subnets(epoch, network) { debug!( self.log, - "Waiting for peers to be available on custody column subnets" + "Waiting for peers to be available on sampling column subnets" ); return Ok(KeepChain); } @@ -1107,14 +1107,18 @@ impl SyncingChain { Ok(KeepChain) } - /// Checks all custody column subnets for peers. Returns `true` if there is at least one peer in - /// every custody column subnet. - fn good_peers_on_custody_subnets(&self, epoch: Epoch, network: &SyncNetworkContext) -> bool { + /// Checks all sampling column subnets for peers. Returns `true` if there is at least one peer in + /// every sampling column subnet. + fn good_peers_on_sampling_subnets( + &self, + epoch: Epoch, + network: &SyncNetworkContext, + ) -> bool { if network.chain.spec.is_peer_das_enabled_for_epoch(epoch) { - // Require peers on all custody column subnets before sending batches + // Require peers on all sampling column subnets before sending batches let peers_on_all_custody_subnets = network .network_globals() - .custody_subnets + .sampling_subnets .iter() .all(|subnet_id| { let peer_count = network @@ -1167,11 +1171,11 @@ impl SyncingChain { return None; } - // don't send batch requests until we have peers on custody subnets + // don't send batch requests until we have peers on sampling subnets // TODO(das): this is a workaround to avoid sending out excessive block requests because // block and data column requests are currently coupled. This can be removed once we find a // way to decouple the requests and do retries individually, see issue #6258. - if !self.good_peers_on_custody_subnets(self.to_be_downloaded, network) { + if !self.good_peers_on_sampling_subnets(self.to_be_downloaded, network) { debug!( self.log, "Waiting for peers to be available on custody column subnets" diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 74fca4c5010..1eca01bbeef 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -140,4 +140,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 07bd21b35c2..500555a2694 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 67f1e5b6831..d67d77d3bea 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -127,4 +127,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index acf4d83f323..18591fecdcd 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -149,4 +149,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 8b84d870103..b08a6180bf0 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -123,4 +123,5 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index e31427121ec..7e933eea197 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -198,6 +198,7 @@ pub struct ChainSpec { pub custody_requirement: u64, pub data_column_sidecar_subnet_count: u64, pub number_of_columns: usize, + pub samples_per_slot: u64, /* * Networking @@ -811,6 +812,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific @@ -1132,6 +1134,7 @@ impl ChainSpec { custody_requirement: 4, data_column_sidecar_subnet_count: 128, number_of_columns: 128, + samples_per_slot: 8, /* * Network specific */ @@ -1382,6 +1385,9 @@ pub struct Config { #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, + #[serde(default = "default_samples_per_slot")] + #[serde(with = "serde_utils::quoted_u64")] + samples_per_slot: u64, } fn default_bellatrix_fork_version() -> [u8; 4] { @@ -1521,17 +1527,21 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { } const fn default_custody_requirement() -> u64 { - 1 + 4 } const fn default_data_column_sidecar_subnet_count() -> u64 { - 32 + 128 } const fn default_number_of_columns() -> u64 { 128 } +const fn default_samples_per_slot() -> u64 { + 8 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( @@ -1727,6 +1737,7 @@ impl Config { custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, number_of_columns: spec.number_of_columns as u64, + samples_per_slot: spec.samples_per_slot, } } @@ -1802,6 +1813,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns, + samples_per_slot, } = self; if preset_base != E::spec_name().to_string().as_str() { @@ -1881,6 +1893,7 @@ impl Config { custody_requirement, data_column_sidecar_subnet_count, number_of_columns: number_of_columns as usize, + samples_per_slot, ..chain_spec.clone() }) @@ -2125,6 +2138,7 @@ mod yaml_tests { CUSTODY_REQUIREMENT: 1 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 + SAMPLES_PER_SLOT: 8 "#; let chain_spec: Config = serde_yaml::from_str(spec).unwrap(); diff --git a/lighthouse/environment/tests/testnet_dir/config.yaml b/lighthouse/environment/tests/testnet_dir/config.yaml index 84e8274f06e..34e42a61f67 100644 --- a/lighthouse/environment/tests/testnet_dir/config.yaml +++ b/lighthouse/environment/tests/testnet_dir/config.yaml @@ -102,4 +102,5 @@ ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # DAS CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 -NUMBER_OF_COLUMNS: 128 \ No newline at end of file +NUMBER_OF_COLUMNS: 128 +SAMPLES_PER_SLOT: 8 \ No newline at end of file From 8cf686f5c11cbad19727885d07df5abfddeddb0f Mon Sep 17 00:00:00 2001 From: Akihito Nakano Date: Fri, 4 Oct 2024 12:00:32 +0900 Subject: [PATCH 2/8] Add test for ActiveSamplingRequest (#6307) * Add test for ActiveSamplingRequest * Fix the column_indexes field from the requested ones to the responded ones * Fix clippy errors * Move tests to tests.rs * Fix unused import * Fix clippy error * Merge branch 'unstable' into fork/add-test-for-active-sampling-request # Conflicts: # beacon_node/network/Cargo.toml # beacon_node/network/src/sync/sampling.rs * Merge branch 'unstable' into fork/add-test-for-active-sampling-request --- Cargo.lock | 1 + beacon_node/network/Cargo.toml | 1 + .../network/src/sync/block_lookups/tests.rs | 92 +++++++++++++++++++ beacon_node/network/src/sync/manager.rs | 14 +++ beacon_node/network/src/sync/peer_sampling.rs | 38 +++++++- 5 files changed, 145 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 44ca67e9b47..3a063e7e0e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5640,6 +5640,7 @@ dependencies = [ "async-channel", "beacon_chain", "beacon_processor", + "bls", "delay_map", "derivative", "error-chain", diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 4df17617326..6d61bffe3de 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,6 +15,7 @@ eth2 = { workspace = true } gossipsub = { workspace = true } eth2_network_config = { workspace = true } kzg = { workspace = true } +bls = { workspace = true } [dependencies] alloy-primitives = { workspace = true } diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index 151333a2ef5..cd4609e1473 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -310,6 +310,13 @@ impl TestRig { ); } + fn expect_active_sampling(&mut self, block_root: &Hash256) { + assert!(self + .sync_manager + .active_sampling_requests() + .contains(block_root)); + } + fn expect_clean_finished_sampling(&mut self) { self.expect_empty_network(); self.expect_sampling_result_work(); @@ -1090,6 +1097,11 @@ impl TestRig { .unwrap_or_else(|e| panic!("Expected sampling result work: {e}")) } + fn expect_no_work_event(&mut self) { + self.drain_processor_rx(); + assert!(self.network_rx_queue.is_empty()); + } + fn expect_no_penalty_for(&mut self, peer_id: PeerId) { self.drain_network_rx(); let downscore_events = self @@ -1290,6 +1302,16 @@ impl TestRig { imported: false, }); } + + fn assert_sampling_request_status( + &self, + block_root: Hash256, + ongoing: &Vec, + no_peers: &Vec, + ) { + self.sync_manager + .assert_sampling_request_status(block_root, ongoing, no_peers) + } } #[test] @@ -2023,6 +2045,76 @@ fn sampling_avoid_retrying_same_peer() { r.expect_empty_network(); } +#[test] +fn sampling_batch_requests() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + + // Resolve the request. + r.complete_valid_sampling_column_requests( + vec![(sync_request_id, column_indexes.clone())], + data_columns, + ); + r.expect_clean_finished_sampling(); +} + +#[test] +fn sampling_batch_requests_not_enough_responses_returned() { + let Some(mut r) = TestRig::test_setup_after_peerdas() else { + return; + }; + let _supernode = r.new_connected_supernode_peer(); + let (block, data_columns) = r.rand_block_and_data_columns(); + let block_root = block.canonical_root(); + r.trigger_sample_block(block_root, block.slot()); + + // Retrieve the sample request, which should be batched. + let (sync_request_id, column_indexes) = r + .expect_only_data_columns_by_root_requests(block_root, 1) + .pop() + .unwrap(); + assert_eq!(column_indexes.len(), SAMPLING_REQUIRED_SUCCESSES); + + // The request status should be set to Sampling. + r.assert_sampling_request_status(block_root, &column_indexes, &vec![]); + + // Split the indexes to simulate the case where the supernode doesn't have the requested column. + let (_column_indexes_supernode_does_not_have, column_indexes_to_complete) = + column_indexes.split_at(1); + + // Complete the requests but only partially, so a NotEnoughResponsesReturned error occurs. + let data_columns_to_complete = data_columns + .iter() + .filter(|d| column_indexes_to_complete.contains(&d.index)) + .cloned() + .collect::>(); + r.complete_data_columns_by_root_request( + (sync_request_id, column_indexes.clone()), + &data_columns_to_complete, + ); + + // The request status should be set to NoPeers since the supernode, the only peer, returned not enough responses. + r.assert_sampling_request_status(block_root, &vec![], &column_indexes); + + // The sampling request stalls. + r.expect_empty_network(); + r.expect_no_work_event(); + r.expect_active_sampling(&block_root); +} + #[test] fn custody_lookup_happy_path() { let Some(mut r) = TestRig::test_setup_after_peerdas() else { diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index c1f4fe54fb7..708c4308b80 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -71,6 +71,9 @@ use std::time::Duration; use tokio::sync::mpsc; use types::{BlobSidecar, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, Slot}; +#[cfg(test)] +use types::ColumnIndex; + /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a /// fully sync'd peer. @@ -334,6 +337,17 @@ impl SyncManager { self.sampling.active_sampling_requests() } + #[cfg(test)] + pub(crate) fn assert_sampling_request_status( + &self, + block_root: Hash256, + ongoing: &Vec, + no_peers: &Vec, + ) { + self.sampling + .assert_sampling_request_status(block_root, ongoing, no_peers); + } + fn network_globals(&self) -> &NetworkGlobals { self.network.network_globals() } diff --git a/beacon_node/network/src/sync/peer_sampling.rs b/beacon_node/network/src/sync/peer_sampling.rs index 4d0fa509cd5..086fb0ec8d3 100644 --- a/beacon_node/network/src/sync/peer_sampling.rs +++ b/beacon_node/network/src/sync/peer_sampling.rs @@ -42,6 +42,18 @@ impl Sampling { self.requests.values().map(|r| r.block_root).collect() } + #[cfg(test)] + pub fn assert_sampling_request_status( + &self, + block_root: Hash256, + ongoing: &Vec, + no_peers: &Vec, + ) { + let requester = SamplingRequester::ImportedBlock(block_root); + let active_sampling_request = self.requests.get(&requester).unwrap(); + active_sampling_request.assert_sampling_request_status(ongoing, no_peers); + } + /// Create a new sampling request for a known block /// /// ### Returns @@ -220,6 +232,21 @@ impl ActiveSamplingRequest { } } + #[cfg(test)] + pub fn assert_sampling_request_status( + &self, + ongoing: &Vec, + no_peers: &Vec, + ) { + for idx in ongoing { + assert!(self.column_requests.get(idx).unwrap().is_ongoing()); + } + + for idx in no_peers { + assert!(self.column_requests.get(idx).unwrap().is_no_peers()); + } + } + /// Insert a downloaded column into an active sampling request. Then make progress on the /// entire request. /// @@ -253,10 +280,14 @@ impl ActiveSamplingRequest { match resp { Ok((mut resp_data_columns, seen_timestamp)) => { + let resp_column_indexes = resp_data_columns + .iter() + .map(|r| r.index) + .collect::>(); debug!(self.log, "Sample download success"; "block_root" => %self.block_root, - "column_indexes" => ?column_indexes, + "column_indexes" => ?resp_column_indexes, "count" => resp_data_columns.len() ); metrics::inc_counter_vec(&metrics::SAMPLE_DOWNLOAD_RESULT, &[metrics::SUCCESS]); @@ -598,6 +629,11 @@ mod request { } } + #[cfg(test)] + pub(crate) fn is_no_peers(&self) -> bool { + matches!(self.status, Status::NoPeers) + } + pub(crate) fn choose_peer( &mut self, cx: &SyncNetworkContext, From 1bd8f31545e8a22833781123852811beeffab37e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 7 Oct 2024 20:41:52 +1100 Subject: [PATCH 3/8] Clean up temporary state flags while running (#6422) * Clean up temporary state flags while running * Add regression test * Simplify --- beacon_node/beacon_chain/tests/store_tests.rs | 57 ++++++++++++++++++- beacon_node/store/src/garbage_collection.rs | 3 +- beacon_node/store/src/hot_cold_store.rs | 9 +++ 3 files changed, 66 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 5d83d65efd2..1a6b444319c 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2514,7 +2514,7 @@ async fn pruning_test( } #[tokio::test] -async fn garbage_collect_temp_states_from_failed_block() { +async fn garbage_collect_temp_states_from_failed_block_on_startup() { let db_path = tempdir().unwrap(); // Wrap these functions to ensure the variables are dropped before we try to open another @@ -2571,6 +2571,61 @@ async fn garbage_collect_temp_states_from_failed_block() { assert_eq!(store.iter_temporary_state_roots().count(), 0); } +#[tokio::test] +async fn garbage_collect_temp_states_from_failed_block_on_finalization() { + let db_path = tempdir().unwrap(); + + let store = get_store(&db_path); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + + let slots_per_epoch = E::slots_per_epoch(); + + let genesis_state = harness.get_current_state(); + let block_slot = Slot::new(2 * slots_per_epoch); + let ((signed_block, _), state) = harness.make_block(genesis_state, block_slot).await; + + let (mut block, _) = (*signed_block).clone().deconstruct(); + + // Mutate the block to make it invalid, and re-sign it. + *block.state_root_mut() = Hash256::repeat_byte(0xff); + let proposer_index = block.proposer_index() as usize; + let block = Arc::new(block.sign( + &harness.validator_keypairs[proposer_index].sk, + &state.fork(), + state.genesis_validators_root(), + &harness.spec, + )); + + // The block should be rejected, but should store a bunch of temporary states. + harness.set_current_slot(block_slot); + harness + .process_block_result((block, None)) + .await + .unwrap_err(); + + assert_eq!( + store.iter_temporary_state_roots().count(), + block_slot.as_usize() - 1 + ); + + // Finalize the chain without the block, which should result in pruning of all temporary states. + let blocks_required_to_finalize = 3 * slots_per_epoch; + harness.advance_slot(); + harness + .extend_chain( + blocks_required_to_finalize as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Check that the finalization migration ran. + assert_ne!(store.get_split_slot(), 0); + + // Check that temporary states have been pruned. + assert_eq!(store.iter_temporary_state_roots().count(), 0); +} + #[tokio::test] async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index c70ef898692..5f8ed8f5e73 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -21,7 +21,6 @@ where .try_fold(vec![], |mut ops, state_root| { let state_root = state_root?; ops.push(StoreOp::DeleteState(state_root, None)); - ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); Result::<_, Error>::Ok(ops) })?; @@ -29,7 +28,7 @@ where debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() / 2 + delete_ops.len() ); self.do_atomically_with_block_and_blobs_cache(delete_ops)?; } diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index ba288039d6b..991f215210c 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1160,10 +1160,19 @@ impl, Cold: ItemStore> HotColdDB } StoreOp::DeleteState(state_root, slot) => { + // Delete the hot state summary. let state_summary_key = get_key_for_col(DBColumn::BeaconStateSummary.into(), state_root.as_slice()); key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); + // Delete the state temporary flag (if any). Temporary flags are commonly + // created by the state advance routine. + let state_temp_key = get_key_for_col( + DBColumn::BeaconStateTemporary.into(), + state_root.as_slice(), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(state_temp_key)); + if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { let state_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_slice()); From 48dd3f385cd3ecde1baeb9135815f376d79b048a Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 7 Oct 2024 09:35:38 -0700 Subject: [PATCH 4/8] Filter out BlsToExecutionChange messages for 0x02 validators (#6464) * Filter out 0x02 validators from `get_bls_to_execution_changes` * Prune bls to execution changes that have a 0x02 credential * lint --- beacon_node/operation_pool/src/bls_to_execution_changes.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index 07fd72f02c5..cbab97e7199 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -113,7 +113,7 @@ impl BlsToExecutionChanges { .validators() .get(validator_index as usize) .map_or(true, |validator| { - let prune = validator.has_eth1_withdrawal_credential(spec) + let prune = validator.has_execution_withdrawal_credential(spec) && head_block .message() .body() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index e6a61edc098..0b032b0c8a7 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -585,7 +585,7 @@ impl OperationPool { && state .get_validator(address_change.as_inner().message.validator_index as usize) .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) + !validator.has_execution_withdrawal_credential(spec) }) }, |address_change| address_change.as_inner().clone(), From 71c5388461df8e48bafb2e1ecd732eb29957ce03 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 9 Oct 2024 00:18:41 +0300 Subject: [PATCH 5/8] Transition block lookup sync to range sync (#6122) * Transition block lookup sync to range sync * Log unexpected state * Merge remote-tracking branch 'sigp/unstable' into lookup-to-range * Add docs * Merge remote-tracking branch 'sigp/unstable' into lookup-to-range --- beacon_node/beacon_chain/src/test_utils.rs | 4 +- .../src/network_beacon_processor/mod.rs | 5 +- .../network/src/sync/block_lookups/mod.rs | 67 ++++++++++--- .../sync/block_lookups/single_block_lookup.rs | 10 +- .../network/src/sync/block_lookups/tests.rs | 48 +++++++++- beacon_node/network/src/sync/manager.rs | 93 +++++++++++++++++-- .../network/src/sync/network_context.rs | 6 ++ .../network/src/sync/range_sync/range.rs | 3 + 8 files changed, 206 insertions(+), 30 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 344820c6a24..9be3b4cc2f9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2790,12 +2790,12 @@ pub fn build_log(level: slog::Level, logger_type: LoggerType) -> Logger { match logger_type { LoggerType::Test => { let drain = FullFormat::new(TermDecorator::new().build()).build().fuse(); - let drain = Async::new(drain).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); Logger::root(drain.filter_level(level).fuse(), o!()) } LoggerType::CI => { let drain = FullFormat::new(ci_decorator()).build().fuse(); - let drain = Async::new(drain).build().fuse(); + let drain = Async::new(drain).chan_size(10_000).build().fuse(); Logger::root(drain.filter_level(level).fuse(), o!()) } LoggerType::Null => { diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 5ec6140828b..04571e181d7 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -25,6 +25,7 @@ use std::sync::Arc; use std::time::Duration; use store::MemoryStore; use task_executor::TaskExecutor; +use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::{self, error::TrySendError}; use types::*; @@ -831,7 +832,7 @@ impl NetworkBeaconProcessor { /// Send a message to `sync_tx`. /// /// Creates a log if there is an internal error. - fn send_sync_message(&self, message: SyncMessage) { + pub(crate) fn send_sync_message(&self, message: SyncMessage) { self.sync_tx.send(message).unwrap_or_else(|e| { debug!(self.log, "Could not send message to the sync service"; "error" => %e) @@ -859,6 +860,7 @@ impl NetworkBeaconProcessor> { // processor (but not much else). pub fn null_for_testing( network_globals: Arc>, + sync_tx: UnboundedSender>, chain: Arc>>, executor: TaskExecutor, log: Logger, @@ -871,7 +873,6 @@ impl NetworkBeaconProcessor> { } = <_>::default(); let (network_tx, _network_rx) = mpsc::unbounded_channel(); - let (sync_tx, _sync_rx) = mpsc::unbounded_channel(); let network_beacon_processor = Self { beacon_processor_send: beacon_processor_tx, diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index a9dbf11fd06..a89f533ecc6 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -28,6 +28,7 @@ use super::network_context::{PeerGroup, RpcResponseError, SyncNetworkContext}; use crate::metrics; use crate::sync::block_lookups::common::ResponseType; use crate::sync::block_lookups::parent_chain::find_oldest_fork_ancestor; +use crate::sync::SyncMessage; use beacon_chain::block_verification_types::AsBlock; use beacon_chain::data_availability_checker::{ AvailabilityCheckError, AvailabilityCheckErrorCategory, @@ -55,7 +56,10 @@ mod tests; /// The maximum depth we will search for a parent block. In principle we should have sync'd any /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. -pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +/// +/// Have the same value as range's sync tolerance to consider a peer synced. Once sync lookup +/// reaches the maximum depth it will force trigger range sync. +pub(crate) const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE; const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60; pub const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 4; @@ -254,22 +258,59 @@ impl BlockLookups { // blocks on top of A forming A -> C. The malicious peer forces us to fetch C // from it, which will result in parent A hitting the chain_too_long error. Then // the valid chain A -> B is dropped too. - if let Ok(block_to_drop) = find_oldest_fork_ancestor(parent_chains, chain_idx) { - // Drop all lookups descending from the child of the too long parent chain - if let Some((lookup_id, lookup)) = self + // + // `find_oldest_fork_ancestor` should never return Err, unwrapping to tip for + // complete-ness + let parent_chain_tip = parent_chain.tip; + let block_to_drop = + find_oldest_fork_ancestor(parent_chains, chain_idx).unwrap_or(parent_chain_tip); + // Drop all lookups descending from the child of the too long parent chain + if let Some((lookup_id, lookup)) = self + .single_block_lookups + .iter() + .find(|(_, l)| l.block_root() == block_to_drop) + { + // If a lookup chain is too long, we can't distinguish a valid chain from a + // malicious one. We must attempt to sync this chain to not lose liveness. If + // the chain grows too long, we stop lookup sync and transition this head to + // forward range sync. We need to tell range sync which head to sync to, and + // from which peers. The lookup of the very tip of this chain may contain zero + // peers if it's the parent-child lookup. So we do a bit of a trick here: + // - Tell range sync to sync to the tip's root (if available, else its ancestor) + // - But use all peers in the ancestor lookup, which should have at least one + // peer, and its peer set is a strict superset of the tip's lookup. + if let Some((_, tip_lookup)) = self .single_block_lookups .iter() - .find(|(_, l)| l.block_root() == block_to_drop) + .find(|(_, l)| l.block_root() == parent_chain_tip) { - for &peer_id in lookup.all_peers() { - cx.report_peer( - peer_id, - PeerAction::LowToleranceError, - "chain_too_long", - ); - } - self.drop_lookup_and_children(*lookup_id); + cx.send_sync_message(SyncMessage::AddPeersForceRangeSync { + peers: lookup.all_peers().copied().collect(), + head_slot: tip_lookup.peek_downloaded_block_slot(), + head_root: parent_chain_tip, + }); + } else { + // Should never happen, log error and continue the lookup drop + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Parent chain tip lookup not found", + "block_root" => ?parent_chain_tip + ); } + + // Do not downscore peers here. Because we can't distinguish a valid chain from + // a malicious one we may penalize honest peers for attempting to discover us a + // valid chain. Until blocks_by_range allows to specify a tip, for example with + // https://github.com/ethereum/consensus-specs/pull/3845 we will have poor + // attributability. A peer can send us garbage blocks over blocks_by_root, and + // then correct blocks via blocks_by_range. + + self.drop_lookup_and_children(*lookup_id); + } else { + // Should never happen + error!(self.log, "Unable to transition lookup to range sync"; + "error" => "Block to drop lookup not found", + "block_root" => ?block_to_drop + ); } return false; diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 73ffcd43845..4e7268a72ac 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -15,7 +15,7 @@ use std::time::{Duration, Instant}; use store::Hash256; use strum::IntoStaticStr; use types::blob_sidecar::FixedBlobSidecarList; -use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock}; +use types::{DataColumnSidecarList, EthSpec, SignedBeaconBlock, Slot}; // Dedicated enum for LookupResult to force its usage #[must_use = "LookupResult must be handled with on_lookup_result"] @@ -91,6 +91,14 @@ impl SingleBlockLookup { } } + /// Return the slot of this lookup's block if it's currently cached as `AwaitingProcessing` + pub fn peek_downloaded_block_slot(&self) -> Option { + self.block_request_state + .state + .peek_downloaded_data() + .map(|block| block.slot()) + } + /// Get the block root that is being requested. pub fn block_root(&self) -> Hash256 { self.block_root diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index cd4609e1473..0ed624fc0db 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1,6 +1,7 @@ use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::sync::manager::{BlockProcessType, SyncManager}; use crate::sync::peer_sampling::SamplingConfig; +use crate::sync::range_sync::RangeSyncType; use crate::sync::{SamplingId, SyncMessage}; use crate::NetworkMessage; use std::sync::Arc; @@ -78,6 +79,8 @@ struct TestRig { network_rx: mpsc::UnboundedReceiver>, /// Stores all `NetworkMessage`s received from `network_recv`. (e.g. outgoing RPC requests) network_rx_queue: Vec>, + /// Receiver for `SyncMessage` from the network + sync_rx: mpsc::UnboundedReceiver>, /// To send `SyncMessage`. For sending RPC responses or block processing results to sync. sync_manager: SyncManager, /// To manipulate sync state and peer connection status @@ -137,6 +140,7 @@ impl TestRig { let chain = harness.chain.clone(); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, sync_rx) = mpsc::unbounded_channel::>(); // TODO(das): make the generation of the ENR use the deterministic rng to have consistent // column assignments let network_config = Arc::new(NetworkConfig::default()); @@ -148,13 +152,12 @@ impl TestRig { )); let (beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals, + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), ); - let (_sync_send, sync_recv) = mpsc::unbounded_channel::>(); - let fork_name = chain.spec.fork_name_at_slot::(chain.slot().unwrap()); // All current tests expect synced and EL online state @@ -168,13 +171,15 @@ impl TestRig { beacon_processor_rx_queue: vec![], network_rx, network_rx_queue: vec![], + sync_rx, rng, network_globals: beacon_processor.network_globals.clone(), sync_manager: SyncManager::new( chain, network_tx, beacon_processor.into(), - sync_recv, + // Pass empty recv not tied to any tx + mpsc::unbounded_channel().1, SamplingConfig::Custom { required_successes: vec![SAMPLING_REQUIRED_SUCCESSES], }, @@ -237,6 +242,13 @@ impl TestRig { self.send_sync_message(SyncMessage::SampleBlock(block_root, block_slot)) } + /// Drain all sync messages in the sync_rx attached to the beacon processor + fn drain_sync_rx(&mut self) { + while let Ok(sync_message) = self.sync_rx.try_recv() { + self.send_sync_message(sync_message); + } + } + fn rand_block(&mut self) -> SignedBeaconBlock { self.rand_block_and_blobs(NumBlobs::None).0 } @@ -293,6 +305,10 @@ impl TestRig { self.sync_manager.active_parent_lookups().len() } + fn active_range_sync_chain(&self) -> (RangeSyncType, Slot, Slot) { + self.sync_manager.get_range_sync_chains().unwrap().unwrap() + } + fn assert_single_lookups_count(&self, count: usize) { assert_eq!( self.active_single_lookups_count(), @@ -1696,7 +1712,18 @@ fn test_parent_lookup_too_deep_grow_ancestor() { ) } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 1) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(chain_hash); } @@ -1723,7 +1750,18 @@ fn test_parent_lookup_too_deep_grow_tip() { ); } - rig.expect_penalty(peer_id, "chain_too_long"); + // Should create a new syncing chain + rig.drain_sync_rx(); + assert_eq!( + rig.active_range_sync_chain(), + ( + RangeSyncType::Head, + Slot::new(0), + Slot::new(PARENT_DEPTH_TOLERANCE as u64 - 2) + ) + ); + // Should not penalize peer, but network is not clear because of the blocks_by_range requests + rig.expect_no_penalty_for(peer_id); rig.assert_failed_chain(tip.canonical_root()); } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 708c4308b80..a2544b82b5f 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -94,6 +94,15 @@ pub enum SyncMessage { /// A useful peer has been discovered. AddPeer(PeerId, SyncInfo), + /// Force trigger range sync for a set of peers given a head they claim to have imported. Used + /// by block lookup to trigger range sync if a parent chain grows too large. + AddPeersForceRangeSync { + peers: Vec, + head_root: Hash256, + /// Sync lookup may not know the Slot of this head. However this situation is very rare. + head_slot: Option, + }, + /// A block has been received from the RPC. RpcBlock { request_id: SyncRequestId, @@ -322,6 +331,13 @@ impl SyncManager { .collect() } + #[cfg(test)] + pub(crate) fn get_range_sync_chains( + &self, + ) -> Result, &'static str> { + self.range_sync.state() + } + #[cfg(test)] pub(crate) fn get_failed_chains(&mut self) -> Vec { self.block_lookups.get_failed_chains() @@ -376,11 +392,30 @@ impl SyncManager { let sync_type = remote_sync_type(&local, &remote, &self.chain); // update the state of the peer. - let should_add = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); - - if matches!(sync_type, PeerSyncType::Advanced) && should_add { - self.range_sync - .add_peer(&mut self.network, local, peer_id, remote); + let is_still_connected = self.update_peer_sync_state(&peer_id, &local, &remote, &sync_type); + if is_still_connected { + match sync_type { + PeerSyncType::Behind => {} // Do nothing + PeerSyncType::Advanced => { + self.range_sync + .add_peer(&mut self.network, local, peer_id, remote); + } + PeerSyncType::FullySynced => { + // Sync considers this peer close enough to the head to not trigger range sync. + // Range sync handles well syncing large ranges of blocks, of a least a few blocks. + // However this peer may be in a fork that we should sync but we have not discovered + // yet. If the head of the peer is unknown, attempt block lookup first. If the + // unknown head turns out to be on a longer fork, it will trigger range sync. + // + // A peer should always be considered `Advanced` if its finalized root is + // unknown and ahead of ours, so we don't check for that root here. + // + // TODO: This fork-choice check is potentially duplicated, review code + if !self.chain.block_is_known_to_fork_choice(&remote.head_root) { + self.handle_unknown_block_root(peer_id, remote.head_root); + } + } + } } self.update_sync_state(); @@ -391,6 +426,44 @@ impl SyncManager { } } + /// Trigger range sync for a set of peers that claim to have imported a head unknown to us. + fn add_peers_force_range_sync( + &mut self, + peers: &[PeerId], + head_root: Hash256, + head_slot: Option, + ) { + let status = self.chain.status_message(); + let local = SyncInfo { + head_slot: status.head_slot, + head_root: status.head_root, + finalized_epoch: status.finalized_epoch, + finalized_root: status.finalized_root, + }; + + let head_slot = head_slot.unwrap_or_else(|| { + debug!(self.log, + "On add peers force range sync assuming local head_slot"; + "local_head_slot" => local.head_slot, + "head_root" => ?head_root + ); + local.head_slot + }); + + let remote = SyncInfo { + head_slot, + head_root, + // Set finalized to same as local to trigger Head sync + finalized_epoch: local.finalized_epoch, + finalized_root: local.finalized_root, + }; + + for peer_id in peers { + self.range_sync + .add_peer(&mut self.network, local.clone(), *peer_id, remote.clone()); + } + } + /// Handles RPC errors related to requests that were emitted from the sync manager. fn inject_error(&mut self, peer_id: PeerId, request_id: SyncRequestId, error: RPCError) { trace!(self.log, "Sync manager received a failed RPC"); @@ -476,8 +549,7 @@ impl SyncManager { } /// Updates the syncing state of a peer. - /// Return whether the peer should be used for range syncing or not, according to its - /// connection status. + /// Return true if the peer is still connected and known to the peers DB fn update_peer_sync_state( &mut self, peer_id: &PeerId, @@ -686,6 +758,13 @@ impl SyncManager { SyncMessage::AddPeer(peer_id, info) => { self.add_peer(peer_id, info); } + SyncMessage::AddPeersForceRangeSync { + peers, + head_root, + head_slot, + } => { + self.add_peers_force_range_sync(&peers, head_root, head_slot); + } SyncMessage::RpcBlock { request_id, peer_id, diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index 492b703f8a2..b67c0bf2dd8 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -7,6 +7,7 @@ pub use self::requests::{BlocksByRootSingleRequest, DataColumnsByRootSingleBlock use super::block_sidecar_coupling::RangeBlockComponentsRequest; use super::manager::BlockProcessType; use super::range_sync::{BatchId, ByRangeRequestType, ChainId}; +use super::SyncMessage; use crate::metrics; use crate::network_beacon_processor::NetworkBeaconProcessor; use crate::service::NetworkMessage; @@ -249,6 +250,11 @@ impl SyncNetworkContext { } } + pub fn send_sync_message(&mut self, sync_message: SyncMessage) { + self.network_beacon_processor + .send_sync_message(sync_message); + } + /// Returns the ids of all the requests made to the given peer_id. pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Vec { let failed_range_ids = diff --git a/beacon_node/network/src/sync/range_sync/range.rs b/beacon_node/network/src/sync/range_sync/range.rs index b88253c9e81..0ef99838dee 100644 --- a/beacon_node/network/src/sync/range_sync/range.rs +++ b/beacon_node/network/src/sync/range_sync/range.rs @@ -386,6 +386,7 @@ where #[cfg(test)] mod tests { use crate::network_beacon_processor::NetworkBeaconProcessor; + use crate::sync::SyncMessage; use crate::NetworkMessage; use super::*; @@ -690,6 +691,7 @@ mod tests { log.new(o!("component" => "range")), ); let (network_tx, network_rx) = mpsc::unbounded_channel(); + let (sync_tx, _sync_rx) = mpsc::unbounded_channel::>(); let network_config = Arc::new(NetworkConfig::default()); let globals = Arc::new(NetworkGlobals::new_test_globals( Vec::new(), @@ -700,6 +702,7 @@ mod tests { let (network_beacon_processor, beacon_processor_rx) = NetworkBeaconProcessor::null_for_testing( globals.clone(), + sync_tx, chain.clone(), harness.runtime.task_executor.clone(), log.clone(), From 352a9cf054b442ae9140c0c399094ca264311dac Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 9 Oct 2024 16:11:24 -0700 Subject: [PATCH 6/8] Add lockbud task to CI (#6470) * Add lockbud task to CI * Allow unknown lint * Merge branch 'unstable' of https://github.com/sigp/lighthouse into lockbud * remove potential deadlock * ignore tokio util crate * Update image --- .github/workflows/test-suite.yml | 15 +++++++++++++++ beacon_node/eth1/src/service.rs | 3 ++- .../state_processing/src/consensus_context.rs | 2 ++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index aff9a71b4ad..7cda3e477d6 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -54,6 +54,20 @@ jobs: done echo "skip_ci=$SKIP_CI" >> $GITHUB_OUTPUT + lockbud: + name: lockbud + runs-on: ubuntu-latest + container: + image: sigmaprime/lockbud:latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies + run: apt update && apt install -y cmake + - name: Generate code coverage + run: | + cargo lockbud -k deadlock -b -l tokio_util + target-branch-check: name: target-branch-check runs-on: ubuntu-latest @@ -433,6 +447,7 @@ jobs: 'cargo-udeps', 'compile-with-beta-compiler', 'cli-check', + 'lockbud', ] steps: - uses: actions/checkout@v4 diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index a70a927307d..71ab98a6a20 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -549,10 +549,11 @@ impl Service { /// Returns the number of deposits with valid signatures that have been observed. pub fn get_valid_signature_count(&self) -> Option { + let highest_safe_block = self.highest_safe_block()?; self.deposits() .read() .cache - .get_valid_signature_count(self.highest_safe_block()?) + .get_valid_signature_count(highest_safe_block) } /// Returns the number of deposits with valid signatures that have been observed, without diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index b0eaf3422d3..0c176d4ab14 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -147,6 +147,8 @@ impl ConsensusContext { } } + #[allow(unknown_lints)] + #[allow(elided_named_lifetimes)] pub fn get_indexed_attestation<'a>( &'a mut self, state: &BeaconState, From 244a460e704184c0e0c356ce9dda20afd995a68a Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 10 Oct 2024 05:34:41 +0300 Subject: [PATCH 7/8] Bound min size of dynamic processor queues (#6466) * Bound min size of dynamic processor queues * Use max * Add test --- beacon_node/beacon_processor/src/lib.rs | 33 +++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index cd5a1d6cff0..02c287b68e3 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -93,6 +93,11 @@ const DEFAULT_MAX_SCHEDULED_WORK_QUEUE_LEN: usize = 3 * DEFAULT_MAX_WORK_EVENT_Q /// slightly, we don't need to adjust the queues during the lifetime of a process. const ACTIVE_VALIDATOR_COUNT_OVERPROVISION_PERCENT: usize = 110; +/// Minimum size of dynamically sized queues. Due to integer division we don't want 0 length queues +/// as the processor won't process that message type. 128 is an arbitrary value value >= 1 that +/// seems reasonable. +const MIN_QUEUE_LEN: usize = 128; + /// Maximum number of queued items that will be stored before dropping them pub struct BeaconProcessorQueueLengths { aggregate_queue: usize, @@ -155,9 +160,15 @@ impl BeaconProcessorQueueLengths { aggregate_queue: 4096, unknown_block_aggregate_queue: 1024, // Capacity for a full slot's worth of attestations if subscribed to all subnets - attestation_queue: active_validator_count / slots_per_epoch, + attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), // Capacity for a full slot's worth of attestations if subscribed to all subnets - unknown_block_attestation_queue: active_validator_count / slots_per_epoch, + unknown_block_attestation_queue: std::cmp::max( + active_validator_count / slots_per_epoch, + MIN_QUEUE_LEN, + ), sync_message_queue: 2048, sync_contribution_queue: 1024, gossip_voluntary_exit_queue: 4096, @@ -1686,3 +1697,21 @@ impl Drop for SendOnDrop { } } } + +#[cfg(test)] +mod tests { + use super::*; + use types::{BeaconState, ChainSpec, Eth1Data, ForkName, MainnetEthSpec}; + + #[test] + fn min_queue_len() { + // State with no validators. + let spec = ForkName::latest().make_genesis_spec(ChainSpec::mainnet()); + let genesis_time = 0; + let state = BeaconState::::new(genesis_time, Eth1Data::default(), &spec); + assert_eq!(state.validators().len(), 0); + let queue_lengths = BeaconProcessorQueueLengths::from_state(&state, &spec).unwrap(); + assert_eq!(queue_lengths.attestation_queue, MIN_QUEUE_LEN); + assert_eq!(queue_lengths.unknown_block_attestation_queue, MIN_QUEUE_LEN); + } +} From da290e8e2e9420a0d3f3a02012f052b90b5f6aab Mon Sep 17 00:00:00 2001 From: hopinheimer <48147533+hopinheimer@users.noreply.github.com> Date: Thu, 10 Oct 2024 07:32:41 -0400 Subject: [PATCH 8/8] Added required `--force-bls-withdrawal-credentials` description to `--disable-deposits` usage (#6436) * cli description * complied docs changes * reverted changes and script amended * fix * reverting unwanted changes * making linter happy * requested changes * Merge branch 'unstable' into cli-fix * Merge branch 'unstable' into cli-fix --- book/src/help_vm_create.md | 4 +++- scripts/cli.sh | 2 +- validator_manager/src/create_validators.rs | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/book/src/help_vm_create.md b/book/src/help_vm_create.md index 1803bb534c6..cde822e8946 100644 --- a/book/src/help_vm_create.md +++ b/book/src/help_vm_create.md @@ -133,7 +133,9 @@ Flags: When provided don't generate the deposits JSON file that is commonly used for submitting validator deposits via a web UI. Using this flag will save several seconds per validator if the user has an alternate - strategy for submitting deposits. + strategy for submitting deposits. If used, the + --force-bls-withdrawal-credentials is also required to ensure users + are aware that an --eth1-withdrawal-address is not set. --disable-log-timestamp If present, do not include timestamps in logging output. --disable-malloc-tuning diff --git a/scripts/cli.sh b/scripts/cli.sh index 6ca019b39e9..e43c05a834f 100755 --- a/scripts/cli.sh +++ b/scripts/cli.sh @@ -16,7 +16,7 @@ write_to_file() { printf "# %s\n\n\`\`\`\n%s\n\`\`\`" "$program" "$cmd" > "$file" # Adjust the width of the help text and append to the end of file - sed -i -e '$a\'$'\n''\n''' "$file" + printf "\n\n%s\n" "" >> "$file" } CMD=./target/release/lighthouse diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d06fce1d094..37a6040a9b0 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -112,7 +112,9 @@ pub fn cli_app() -> Command { "When provided don't generate the deposits JSON file that is \ commonly used for submitting validator deposits via a web UI. \ Using this flag will save several seconds per validator if the \ - user has an alternate strategy for submitting deposits.", + user has an alternate strategy for submitting deposits. \ + If used, the --force-bls-withdrawal-credentials is also required \ + to ensure users are aware that an --eth1-withdrawal-address is not set.", ) .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER)