Skip to content

Commit

Permalink
various todos and leftovers
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsutton committed Feb 18, 2025
1 parent 49fdcdc commit e3d386a
Show file tree
Hide file tree
Showing 10 changed files with 100 additions and 63 deletions.
81 changes: 41 additions & 40 deletions consensus/core/src/config/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -269,20 +269,14 @@ impl Params {
)
}

/// Returns the past median time sample rate,
/// depending on a selected parent DAA score
/// Returns the past median time sample rate
#[inline]
#[must_use]
pub fn past_median_time_sample_rate(&self, selected_parent_daa_score: u64) -> u64 {
if self.crescendo_activation.is_active(selected_parent_daa_score) {
self.crescendo.past_median_time_sample_rate
} else {
1
}
pub fn past_median_time_sample_rate(&self) -> ForkedParam<u64> {
ForkedParam::new(1, self.crescendo.past_median_time_sample_rate, self.crescendo_activation)
}

/// Returns the size of the blocks window that is inspected to calculate the difficulty,
/// depending on a selected parent DAA score
/// Returns the size of the blocks window that is inspected to calculate the difficulty
#[inline]
#[must_use]
pub fn difficulty_window_size(&self) -> ForkedParam<usize> {
Expand All @@ -293,16 +287,11 @@ impl Params {
)
}

/// Returns the difficulty sample rate,
/// depending on a selected parent DAA score
/// Returns the difficulty sample rate
#[inline]
#[must_use]
pub fn difficulty_sample_rate(&self, selected_parent_daa_score: u64) -> u64 {
if self.crescendo_activation.is_active(selected_parent_daa_score) {
self.crescendo.difficulty_sample_rate
} else {
1
}
pub fn difficulty_sample_rate(&self) -> ForkedParam<u64> {
ForkedParam::new(1, self.crescendo.difficulty_sample_rate, self.crescendo_activation)
}

/// Returns the target time per block
Expand Down Expand Up @@ -347,48 +336,58 @@ impl Params {
ForkedParam::new(self.prior_pruning_depth, self.crescendo.pruning_depth, self.crescendo_activation)
}

// TODO (Crescendo)
pub fn finality_duration(&self) -> u64 {
self.prior_target_time_per_block * self.prior_finality_depth
pub fn finality_duration_in_milliseconds(&self) -> ForkedParam<u64> {
ForkedParam::new(
self.prior_target_time_per_block * self.prior_finality_depth,
self.crescendo.target_time_per_block * self.crescendo.finality_depth,
self.crescendo_activation,
)
}

// TODO (Crescendo)
pub fn daa_window_duration_in_blocks(&self, selected_parent_daa_score: u64) -> u64 {
if self.crescendo_activation.is_active(selected_parent_daa_score) {
self.crescendo.difficulty_sample_rate * self.crescendo.sampled_difficulty_window_size
} else {
self.prior_difficulty_window_size as u64
}
pub fn difficulty_window_duration_in_block_units(&self) -> ForkedParam<u64> {
ForkedParam::new(
self.prior_difficulty_window_size as u64,
self.crescendo.difficulty_sample_rate * self.crescendo.sampled_difficulty_window_size,
self.crescendo_activation,
)
}

// TODO (Crescendo)
fn expected_daa_window_duration_in_milliseconds(&self, selected_parent_daa_score: u64) -> u64 {
if self.crescendo_activation.is_active(selected_parent_daa_score) {
fn expected_difficulty_window_duration_in_milliseconds(&self) -> ForkedParam<u64> {
ForkedParam::new(
self.prior_target_time_per_block * self.prior_difficulty_window_size as u64,
self.crescendo.target_time_per_block
* self.crescendo.difficulty_sample_rate
* self.crescendo.sampled_difficulty_window_size
} else {
self.prior_target_time_per_block * self.prior_difficulty_window_size as u64
}
* self.crescendo.sampled_difficulty_window_size,
self.crescendo_activation,
)
}

/// Returns the depth at which the anticone of a chain block is final (i.e., is a permanently closed set).
/// Based on the analysis at <https://github.com/kaspanet/docs/blob/main/Reference/prunality/Prunality.pdf>
/// and on the decomposition of merge depth (rule R-I therein) from finality depth (φ)
pub fn anticone_finalization_depth(&self) -> u64 {
// TODO (Crescendo)
let anticone_finalization_depth = self.prior_finality_depth
pub fn anticone_finalization_depth(&self) -> ForkedParam<u64> {
let prior_anticone_finalization_depth = self.prior_finality_depth
+ self.prior_merge_depth
+ 4 * self.prior_mergeset_size_limit * self.prior_ghostdag_k as u64
+ 2 * self.prior_ghostdag_k as u64
+ 2;

let new_anticone_finalization_depth = self.crescendo.finality_depth
+ self.crescendo.merge_depth
+ 4 * self.crescendo.mergeset_size_limit * self.crescendo.ghostdag_k as u64
+ 2 * self.crescendo.ghostdag_k as u64
+ 2;

// In mainnet it's guaranteed that `self.pruning_depth` is greater
// than `anticone_finalization_depth`, but for some tests we use
// a smaller (unsafe) pruning depth, so we return the minimum of
// the two to avoid a situation where a block can be pruned and
// not finalized.
min(self.prior_pruning_depth, anticone_finalization_depth)
ForkedParam::new(
min(self.prior_pruning_depth, prior_anticone_finalization_depth),
min(self.crescendo.pruning_depth, new_anticone_finalization_depth),
self.crescendo_activation,
)
}

/// Returns whether the sink timestamp is recent enough and the node is considered synced or nearly synced.
Expand All @@ -397,7 +396,9 @@ impl Params {
// We consider the node close to being synced if the sink (virtual selected parent) block
// timestamp is within DAA window duration far in the past. Blocks mined over such DAG state would
// enter the DAA window of fully-synced nodes and thus contribute to overall network difficulty
unix_now() < sink_timestamp + self.expected_daa_window_duration_in_milliseconds(sink_daa_score)
//
// [Crescendo]: both durations are nearly equal so this decision is negligible
unix_now() < sink_timestamp + self.expected_difficulty_window_duration_in_milliseconds().get(sink_daa_score)
} else {
// For testnets we consider the node to be synced if the sink timestamp is within a time range which
// is overwhelmingly unlikely to pass without mined blocks even if net hashrate decreased dramatically.
Expand Down
9 changes: 7 additions & 2 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1000,16 +1000,21 @@ impl ConsensusApi for Consensus {
self.validate_block_exists(hash)?;

// In order to guarantee the chain height is at least k, we check that the pruning point is not genesis.
if self.pruning_point() == self.config.genesis.hash {
let pruning_point = self.pruning_point();
if pruning_point == self.config.genesis.hash {
return Err(ConsensusError::UnexpectedPruningPoint);
}

// [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent
// DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data)
let ghostdag_k = self.config.ghostdag_k().get(self.headers_store.get_daa_score(pruning_point).unwrap());

// Note: the method `get_ghostdag_chain_k_depth` might return a partial chain if data is missing.
// Ideally this node when synced would validate it got all of the associated data up to k blocks
// back and then we would be able to assert we actually got `k + 1` blocks, however we choose to
// simply ignore, since if the data was truly missing we wouldn't accept the staging consensus in
// the first place
Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash))
Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash, ghostdag_k))
}

fn create_block_locator_from_pruning_point(&self, high: Hash, limit: usize) -> ConsensusResult<Vec<Hash>> {
Expand Down
2 changes: 1 addition & 1 deletion consensus/src/consensus/services.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ impl ConsensusServices {
params.genesis.hash,
params.pruning_proof_m,
params.anticone_finalization_depth(),
params.prior_ghostdag_k, // TODO (Crescendo)
params.ghostdag_k(),
is_consensus_exiting,
));

Expand Down
6 changes: 4 additions & 2 deletions consensus/src/processes/pruning_proof/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use kaspa_consensus_core::{
blockhash::{BlockHashExtensions, BlockHashes},
header::Header,
pruning::PruningPointProof,
BlockHashSet, BlockLevel, HashMapCustomHasher,
BlockHashSet, BlockLevel, HashMapCustomHasher, KType,
};
use kaspa_core::debug;
use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions, DB};
Expand Down Expand Up @@ -285,6 +285,7 @@ impl PruningProofManager {
&ghostdag_store,
Some(block_at_depth_m_at_next_level),
level,
self.ghostdag_k.get(pp_header.header.daa_score),
);

// Step 4 - Check if we actually have enough depth.
Expand Down Expand Up @@ -325,6 +326,7 @@ impl PruningProofManager {
ghostdag_store: &Arc<DbGhostdagStore>,
required_block: Option<Hash>,
level: BlockLevel,
ghostdag_k: KType,
) -> bool {
let relations_service = RelationsStoreInFutureOfRoot {
relations_store: self.level_relations_services[level as usize].clone(),
Expand All @@ -333,7 +335,7 @@ impl PruningProofManager {
};
let gd_manager = GhostdagManager::with_level(
root,
self.ghostdag_k,
ghostdag_k,
ghostdag_store.clone(),
relations_service.clone(),
self.headers_store.clone(),
Expand Down
27 changes: 18 additions & 9 deletions consensus/src/processes/pruning_proof/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ use rocksdb::WriteBatch;

use kaspa_consensus_core::{
blockhash::{self, BlockHashExtensions},
config::params::ForkedParam,
errors::consensus::{ConsensusError, ConsensusResult},
header::Header,
pruning::{PruningPointProof, PruningPointTrustedData},
Expand Down Expand Up @@ -121,8 +122,8 @@ pub struct PruningProofManager {
max_block_level: BlockLevel,
genesis_hash: Hash,
pruning_proof_m: u64,
anticone_finalization_depth: u64,
ghostdag_k: KType,
anticone_finalization_depth: ForkedParam<u64>,
ghostdag_k: ForkedParam<KType>,

is_consensus_exiting: Arc<AtomicBool>,
}
Expand All @@ -140,8 +141,8 @@ impl PruningProofManager {
max_block_level: BlockLevel,
genesis_hash: Hash,
pruning_proof_m: u64,
anticone_finalization_depth: u64,
ghostdag_k: KType,
anticone_finalization_depth: ForkedParam<u64>,
ghostdag_k: ForkedParam<KType>,
is_consensus_exiting: Arc<AtomicBool>,
) -> Self {
Self {
Expand Down Expand Up @@ -244,10 +245,10 @@ impl PruningProofManager {
/// the search is halted and a partial chain is returned.
///
/// The returned hashes are guaranteed to have GHOSTDAG data
pub(crate) fn get_ghostdag_chain_k_depth(&self, hash: Hash) -> Vec<Hash> {
let mut hashes = Vec::with_capacity(self.ghostdag_k as usize + 1);
pub(crate) fn get_ghostdag_chain_k_depth(&self, hash: Hash, ghostdag_k: KType) -> Vec<Hash> {
let mut hashes = Vec::with_capacity(ghostdag_k as usize + 1);
let mut current = hash;
for _ in 0..=self.ghostdag_k {
for _ in 0..=ghostdag_k {
hashes.push(current);
let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else {
break;
Expand Down Expand Up @@ -275,6 +276,10 @@ impl PruningProofManager {
let mut daa_window_blocks = BlockHashMap::new();
let mut ghostdag_blocks = BlockHashMap::new();

// [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent
// DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data)
let ghostdag_k = self.ghostdag_k.get(self.headers_store.get_daa_score(pruning_point).unwrap());

// PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel)

for anticone_block in anticone.iter().copied() {
Expand All @@ -291,7 +296,7 @@ impl PruningProofManager {
}
}

let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block);
let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block, ghostdag_k);
for hash in ghostdag_chain {
if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) {
let ghostdag = self.ghostdag_store.get_data(hash).unwrap();
Expand Down Expand Up @@ -369,8 +374,12 @@ impl PruningProofManager {
let virtual_state = self.virtual_stores.read().state.get().unwrap();
let pp_bs = self.headers_store.get_blue_score(pp).unwrap();

// [Crescendo]: use pruning point DAA score for activation. This means that only after sufficient time
// post activation we will require the increased finalization depth
let pruning_point_daa_score = self.headers_store.get_daa_score(pp).unwrap();

// The anticone is considered final only if the pruning point is at sufficient depth from virtual
if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth {
if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth.get(pruning_point_daa_score) {
let anticone = Arc::new(self.calculate_pruning_point_anticone_and_trusted_data(pp, virtual_state.parents.iter().copied()));
cache_lock.replace(CachedPruningPointData { pruning_point: pp, data: anticone.clone() });
Ok(anticone)
Expand Down
6 changes: 5 additions & 1 deletion consensus/src/processes/pruning_proof/validate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,10 @@ impl PruningProofManager {
return Err(PruningImportError::PruningProofNotEnoughHeaders);
}

// [Crescendo]: decide on ghostdag K based on proof pruning point DAA score
let proof_pp_daa_score = proof[0].last().expect("checked if empty").daa_score;
let ghostdag_k = self.ghostdag_k.get(proof_pp_daa_score);

let headers_estimate = self.estimate_proof_unique_size(proof);

let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10));
Expand All @@ -199,7 +203,7 @@ impl PruningProofManager {
.map(|(level, ghostdag_store)| {
GhostdagManager::with_level(
self.genesis_hash,
self.ghostdag_k,
ghostdag_k,
ghostdag_store,
relations_stores[level].clone(),
headers_store.clone(),
Expand Down
12 changes: 10 additions & 2 deletions protocol/flows/src/v5/ibd/flow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,18 @@ impl IbdFlow {
// thing is that we eventually adjust to the longer period.
let pruning_depth = self.ctx.config.pruning_depth().get(hst_header.daa_score);
if relay_header.blue_score >= hst_header.blue_score + pruning_depth && relay_header.blue_work > hst_header.blue_work {
if unix_now() > consensus.async_creation_timestamp().await + self.ctx.config.finality_duration() {
// [Crescendo]: switch to the new *shorter* finality duration only after sufficient time has passed
// since activation (measured via the new *larger* finality depth).
// Note: these are not critical execution paths so such estimation heuristics are completely ok in this context.
let finality_duration_in_milliseconds = self
.ctx
.config
.finality_duration_in_milliseconds()
.get(hst_header.daa_score.saturating_sub(self.ctx.config.finality_depth().upper_bound()));
if unix_now() > consensus.async_creation_timestamp().await + finality_duration_in_milliseconds {
let fp = consensus.async_finality_point().await;
let fp_ts = consensus.async_get_header(fp).await?.timestamp;
if unix_now() < fp_ts + self.ctx.config.finality_duration() * 3 / 2 {
if unix_now() < fp_ts + finality_duration_in_milliseconds * 3 / 2 {
// We reject the headers proof if the node has a relatively up-to-date finality point and current
// consensus has matured for long enough (and not recently synced). This is mostly a spam-protector
// since subsequent checks identify these violations as well
Expand Down
10 changes: 8 additions & 2 deletions rpc/service/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,14 @@ impl RpcApi for RpcCoreService {

// A simple heuristic check which signals that the mined block is out of date
// and should not be accepted unless user explicitly requests
let daa_window_block_duration = self.config.daa_window_duration_in_blocks(virtual_daa_score);
if virtual_daa_score > daa_window_block_duration && block.header.daa_score < virtual_daa_score - daa_window_block_duration
//
// [Crescendo]: switch to the larger duration only after a full window with the new duration is reached post activation
let difficulty_window_duration = self
.config
.difficulty_window_duration_in_block_units()
.get(virtual_daa_score.saturating_sub(self.config.difficulty_window_duration_in_block_units().after()));
if virtual_daa_score > difficulty_window_duration
&& block.header.daa_score < virtual_daa_score - difficulty_window_duration
{
// error = format!("Block rejected. Reason: block DAA score {0} is too far behind virtual's DAA score {1}", block.header.daa_score, virtual_daa_score)
return Ok(SubmitBlockResponse { report: SubmitBlockReport::Reject(SubmitBlockRejectReason::BlockInvalid) });
Expand Down
2 changes: 1 addition & 1 deletion simpa/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ fn apply_args_to_consensus_params(args: &Args, params: &mut Params) {
params.prior_finality_depth = 128;
params.prior_merge_depth = 128;
params.prior_mergeset_size_limit = 32;
params.prior_pruning_depth = params.anticone_finalization_depth();
params.prior_pruning_depth = params.anticone_finalization_depth().before();
info!("Setting pruning depth to {}", params.prior_pruning_depth);
}
}
Expand Down
8 changes: 5 additions & 3 deletions testing/integration/src/consensus_integration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,8 @@ async fn median_time_test() {
let consensus = TestConsensus::new(&test.config);
let wait_handles = consensus.init();

let num_blocks = test.config.past_median_time_window_size().before() as u64 * test.config.past_median_time_sample_rate(0);
let num_blocks =
test.config.past_median_time_window_size().before() as u64 * test.config.past_median_time_sample_rate().before();
let timestamp_deviation_tolerance = test.config.timestamp_deviation_tolerance;
for i in 1..(num_blocks + 1) {
let parent = if i == 1 { test.config.genesis.hash } else { (i - 1).into() };
Expand Down Expand Up @@ -1367,7 +1368,8 @@ async fn difficulty_test() {
}

fn full_window_bits(consensus: &TestConsensus, hash: Hash) -> u32 {
let window_size = consensus.params().difficulty_window_size().before() * consensus.params().difficulty_sample_rate(0) as usize;
let window_size =
consensus.params().difficulty_window_size().before() * consensus.params().difficulty_sample_rate().before() as usize;
let ghostdag_data = &consensus.ghostdag_store().get_data(hash).unwrap();
let window = consensus.window_manager().block_window(ghostdag_data, WindowType::VaryingWindow(window_size)).unwrap();
assert_eq!(window.blocks.len(), window_size);
Expand Down Expand Up @@ -1453,7 +1455,7 @@ async fn difficulty_test() {
let consensus = TestConsensus::new(&test.config);
let wait_handles = consensus.init();

let sample_rate = test.config.difficulty_sample_rate(0);
let sample_rate = test.config.difficulty_sample_rate().before();
let expanded_window_size = test.config.difficulty_window_size().before() * sample_rate as usize;

let fake_genesis = Header {
Expand Down

0 comments on commit e3d386a

Please sign in to comment.