diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index c67caf07d4..cfca4057fc 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -10,6 +10,7 @@ use kaspa_consensus_core::{ daa_score_timestamp::DaaScoreTimestamp, errors::consensus::ConsensusResult, header::Header, + mass::{ContextualMasses, NonContextualMasses}, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, @@ -191,14 +192,14 @@ impl ConsensusSessionOwned { self.consensus.validate_and_insert_trusted_block(tb) } - pub fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { + pub fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { // This method performs pure calculations so no need for an async wrapper - self.consensus.calculate_transaction_compute_mass(transaction) + self.consensus.calculate_transaction_non_contextual_masses(transaction) } - pub fn calculate_transaction_storage_mass(&self, transaction: &MutableTransaction) -> Option { + pub fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { // This method performs pure calculations so no need for an async wrapper - self.consensus.calculate_transaction_storage_mass(transaction) + self.consensus.calculate_transaction_contextual_masses(transaction) } pub fn get_virtual_daa_score(&self) -> u64 { diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index f8df0c0e14..0096d114d8 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -17,6 +17,7 @@ use crate::{ tx::TxResult, }, header::Header, + mass::{ContextualMasses, NonContextualMasses}, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, SignableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, @@ -90,11 +91,11 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { unimplemented!() } - fn calculate_transaction_storage_mass(&self, transaction: &MutableTransaction) -> Option { + fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { unimplemented!() } diff --git a/consensus/core/src/config/bps.rs b/consensus/core/src/config/bps.rs index 5e98aac5df..3ae170ef79 100644 --- a/consensus/core/src/config/bps.rs +++ b/consensus/core/src/config/bps.rs @@ -20,8 +20,8 @@ pub fn calculate_ghostdag_k(x: f64, delta: f64) -> u64 { } } -/// Bps-related constants generator for testnet 11 -pub type Testnet11Bps = Bps<10>; +/// Bps-related constants generator for 10-bps networks +pub type TenBps = Bps<10>; /// Struct representing network blocks-per-second. Provides a bunch of const functions /// computing various constants which are functions of the BPS value @@ -93,24 +93,21 @@ impl Bps { BPS * NEW_FINALITY_DURATION } - /// Limit used to previously calculate the pruning depth. - const fn prev_mergeset_size_limit() -> u64 { - Self::ghostdag_k() as u64 * 10 - } - pub const fn pruning_depth() -> u64 { // Based on the analysis at https://github.com/kaspanet/docs/blob/main/Reference/prunality/Prunality.pdf // and on the decomposition of merge depth (rule R-I therein) from finality depth (φ) // We add an additional merge depth unit as a safety margin for anticone finalization - Self::finality_depth() + let lower_bound = Self::finality_depth() + Self::merge_depth_bound() * 2 - + 4 * Self::prev_mergeset_size_limit() * Self::ghostdag_k() as u64 + + 4 * Self::mergeset_size_limit() * Self::ghostdag_k() as u64 + 2 * Self::ghostdag_k() as u64 - + 2 + + 2; - // TODO (HF or restart of TN11): - // Return `Self::finality_depth() * 3` and assert that this value is equal or larger than the above expression. - // This will give us a round easy number to track which is not sensitive to minor changes in other related params. + if lower_bound > BPS * NEW_PRUNING_DURATION { + lower_bound + } else { + BPS * NEW_PRUNING_DURATION + } } pub const fn pruning_proof_m() -> u64 { diff --git a/consensus/core/src/config/constants.rs b/consensus/core/src/config/constants.rs index 7818789339..02eabb7114 100644 --- a/consensus/core/src/config/constants.rs +++ b/consensus/core/src/config/constants.rs @@ -23,20 +23,15 @@ pub mod consensus { // ~~~~~~~~~~~~~~~~~~ Timestamp deviation & Median time ~~~~~~~~~~~~~~~~~~ // - /// **Legacy** timestamp deviation tolerance (seconds) - pub const LEGACY_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132; - - /// **New** timestamp deviation tolerance (seconds). - /// TODO: KIP-0004: 605 (~10 minutes) - pub const NEW_TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132; + /// Timestamp deviation tolerance (seconds) + pub const TIMESTAMP_DEVIATION_TOLERANCE: u64 = 132; /// The desired interval between samples of the median time window (seconds). - /// KIP-0004: 10 seconds pub const PAST_MEDIAN_TIME_SAMPLE_INTERVAL: u64 = 10; /// Size of the **sampled** median time window (independent of BPS) pub const MEDIAN_TIME_SAMPLED_WINDOW_SIZE: u64 = - (2 * NEW_TIMESTAMP_DEVIATION_TOLERANCE - 1).div_ceil(PAST_MEDIAN_TIME_SAMPLE_INTERVAL); + (2 * TIMESTAMP_DEVIATION_TOLERANCE - 1).div_ceil(PAST_MEDIAN_TIME_SAMPLE_INTERVAL); // // ~~~~~~~~~~~~~~~~~~~~~~~~~ Max difficulty target ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -56,18 +51,21 @@ pub mod consensus { // ~~~~~~~~~~~~~~~~~~~ Difficulty Adjustment Algorithm (DAA) ~~~~~~~~~~~~~~~~~~~ // - /// Minimal size of the difficulty window. Affects the DA algorithm only at the starting period of a new net - pub const MIN_DIFFICULTY_WINDOW_LEN: usize = 10; + /// Minimal size of the difficulty window. Affects the DA algorithm at the starting period of a new net. + /// Also used during BPS fork transitions to stabilize the new rate before applying DA (see KIP-14). + /// With 4 seconds sampling interval, a value of 150 indicates 10 minutes of fixed + /// difficulty until the window grows large enough. + /// + /// TODO (crescendo): finalize + pub const MIN_DIFFICULTY_WINDOW_SIZE: usize = 150; /// **Legacy** difficulty adjustment window size corresponding to ~44 minutes with 1 BPS pub const LEGACY_DIFFICULTY_WINDOW_SIZE: usize = 2641; /// **New** difficulty window duration expressed in time units (seconds). - /// TODO: KIP-0004: 30,000 (500 minutes) pub const NEW_DIFFICULTY_WINDOW_DURATION: u64 = 2641; /// The desired interval between samples of the difficulty window (seconds). - /// TODO: KIP-0004: 30 seconds pub const DIFFICULTY_WINDOW_SAMPLE_INTERVAL: u64 = 4; /// Size of the **sampled** difficulty window (independent of BPS) @@ -81,9 +79,11 @@ pub mod consensus { pub const LEGACY_FINALITY_DEPTH: u64 = 86_400; /// **New** finality duration expressed in time units (seconds). - /// TODO: finalize this value (consider 6-24 hours) pub const NEW_FINALITY_DURATION: u64 = 43_200; // 12 hours + /// **New** pruning duration expressed in time units (seconds). + pub const NEW_PRUNING_DURATION: u64 = 108_000; // 30 hours + /// Merge depth bound duration (in seconds). For 1 BPS networks this equals the legacy depth /// bound in block units. For higher BPS networks this should be scaled up. /// @@ -166,7 +166,7 @@ pub mod perf { impl PerfParams { pub fn adjust_to_consensus_params(&mut self, consensus_params: &Params) { // Allow caching up to 10x over the baseline - self.block_data_cache_size *= consensus_params.bps().clamp(1, 10) as usize; + self.block_data_cache_size *= consensus_params.bps().upper_bound().clamp(1, 10) as usize; } } } diff --git a/consensus/core/src/config/genesis.rs b/consensus/core/src/config/genesis.rs index 9f9ea21e54..06d1431ed2 100644 --- a/consensus/core/src/config/genesis.rs +++ b/consensus/core/src/config/genesis.rs @@ -225,7 +225,7 @@ pub const DEVNET_GENESIS: GenesisBlock = GenesisBlock { #[cfg(test)] mod tests { use super::*; - use crate::{config::bps::Testnet11Bps, merkle::calc_hash_merkle_root}; + use crate::{config::bps::TenBps, merkle::calc_hash_merkle_root}; #[test] fn test_genesis_hashes() { @@ -238,7 +238,7 @@ mod tests { #[test] fn gen_testnet11_genesis() { - let bps = Testnet11Bps::bps(); + let bps = TenBps::bps(); let mut genesis = TESTNET_GENESIS; let target = kaspa_math::Uint256::from_compact_target_bits(genesis.bits); let scaled_target = target * bps / 100; diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index b0ab02e98e..634d9e3c15 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -1,5 +1,5 @@ pub use super::{ - bps::{Bps, Testnet11Bps}, + bps::{Bps, TenBps}, constants::consensus::*, genesis::{GenesisBlock, DEVNET_GENESIS, GENESIS, SIMNET_GENESIS, TESTNET11_GENESIS, TESTNET_GENESIS}, }; @@ -19,16 +19,26 @@ use std::{ pub struct ForkActivation(u64); impl ForkActivation { + const NEVER: u64 = u64::MAX; + const ALWAYS: u64 = 0; + pub const fn new(daa_score: u64) -> Self { Self(daa_score) } pub const fn never() -> Self { - Self(u64::MAX) + Self(Self::NEVER) } pub const fn always() -> Self { - Self(0) + Self(Self::ALWAYS) + } + + /// Returns the actual DAA score triggering the activation. Should be used only + /// for cases where the explicit value is required for computations (e.g., coinbase subsidy). + /// Otherwise, **activation checks should always go through `self.is_active(..)`** + pub fn daa_score(self) -> u64 { + self.0 } pub fn is_active(self, current_daa_score: u64) -> bool { @@ -42,6 +52,137 @@ impl ForkActivation { } } +/// A consensus parameter which depends on forking activation +#[derive(Clone, Copy, Debug)] +pub struct ForkedParam { + pre: T, + post: T, + activation: ForkActivation, +} + +impl ForkedParam { + const fn new(pre: T, post: T, activation: ForkActivation) -> Self { + Self { pre, post, activation } + } + + pub const fn new_const(val: T) -> Self { + Self { pre: val, post: val, activation: ForkActivation::never() } + } + + pub fn activation(&self) -> ForkActivation { + self.activation + } + + pub fn get(&self, daa_score: u64) -> T { + if self.activation.is_active(daa_score) { + self.post + } else { + self.pre + } + } + + /// Returns the value before activation (=pre unless activation = always) + pub fn before(&self) -> T { + match self.activation.0 { + ForkActivation::ALWAYS => self.post, + _ => self.pre, + } + } + + /// Returns the permanent long-term value after activation (=post unless the activation is never scheduled) + pub fn after(&self) -> T { + match self.activation.0 { + ForkActivation::NEVER => self.pre, + _ => self.post, + } + } + + /// Maps the ForkedParam to a new ForkedParam by applying a map function on both pre and post + pub fn map U>(&self, f: F) -> ForkedParam { + ForkedParam::new(f(self.pre), f(self.post), self.activation) + } +} + +impl ForkedParam { + /// Returns the min of `pre` and `post` values. Useful for non-consensus initializations + /// which require knowledge of the value bounds. + /// + /// Note that if activation is not scheduled (set to never) then pre is always returned, + /// and if activation is set to always (since inception), post will be returned. + pub fn lower_bound(&self) -> T { + match self.activation.0 { + ForkActivation::NEVER => self.pre, + ForkActivation::ALWAYS => self.post, + _ => self.pre.min(self.post), + } + } + + /// Returns the max of `pre` and `post` values. Useful for non-consensus initializations + /// which require knowledge of the value bounds. + /// + /// Note that if activation is not scheduled (set to never) then pre is always returned, + /// and if activation is set to always (since inception), post will be returned. + pub fn upper_bound(&self) -> T { + match self.activation.0 { + ForkActivation::NEVER => self.pre, + ForkActivation::ALWAYS => self.post, + _ => self.pre.max(self.post), + } + } +} + +/// Fork params for the Crescendo hardfork +#[derive(Clone, Debug)] +pub struct CrescendoParams { + pub past_median_time_sampled_window_size: u64, + pub sampled_difficulty_window_size: u64, + + /// Target time per block (in milliseconds) + pub target_time_per_block: u64, + pub ghostdag_k: KType, + + pub past_median_time_sample_rate: u64, + pub difficulty_sample_rate: u64, + + pub max_block_parents: u8, + pub mergeset_size_limit: u64, + pub merge_depth: u64, + pub finality_depth: u64, + pub pruning_depth: u64, + + pub max_tx_inputs: usize, + pub max_tx_outputs: usize, + pub max_signature_script_len: usize, + pub max_script_public_key_len: usize, + + pub coinbase_maturity: u64, +} + +pub const CRESCENDO: CrescendoParams = CrescendoParams { + past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, + sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE, + + // + // ~~~~~~~~~~~~~~~~~~ BPS dependent constants ~~~~~~~~~~~~~~~~~~ + // + target_time_per_block: TenBps::target_time_per_block(), + ghostdag_k: TenBps::ghostdag_k(), + past_median_time_sample_rate: TenBps::past_median_time_sample_rate(), + difficulty_sample_rate: TenBps::difficulty_adjustment_sample_rate(), + max_block_parents: TenBps::max_block_parents(), + mergeset_size_limit: TenBps::mergeset_size_limit(), + merge_depth: TenBps::merge_depth_bound(), + finality_depth: TenBps::finality_depth(), + pruning_depth: TenBps::pruning_depth(), + coinbase_maturity: TenBps::coinbase_maturity(), + + // TODO (crescendo): finalize all below + max_tx_inputs: 1000, + max_tx_outputs: 1000, + max_signature_script_len: 100_000, + max_script_public_key_len: 10_000, +}; + /// Consensus parameters. Contains settings and configurations which are consensus-sensitive. /// Changing one of these on a network node would exclude and prevent it from reaching consensus /// with the other unmodified nodes. @@ -50,25 +191,13 @@ pub struct Params { pub dns_seeders: &'static [&'static str], pub net: NetworkId, pub genesis: GenesisBlock, - pub ghostdag_k: KType, + pub prior_ghostdag_k: KType, - /// Legacy timestamp deviation tolerance (in seconds) - pub legacy_timestamp_deviation_tolerance: u64, - - /// New timestamp deviation tolerance (in seconds, activated with sampling) - pub new_timestamp_deviation_tolerance: u64, - - /// Block sample rate for filling the past median time window (selects one every N blocks) - pub past_median_time_sample_rate: u64, - - /// Size of sampled blocks window that is inspected to calculate the past median time of each block - pub past_median_time_sampled_window_size: u64, + /// Timestamp deviation tolerance (in seconds) + pub timestamp_deviation_tolerance: u64, /// Target time per block (in milliseconds) - pub target_time_per_block: u64, - - /// DAA score from which the window sampling starts for difficulty and past median time calculation - pub sampling_activation: ForkActivation, + pub prior_target_time_per_block: u64, /// Defines the highest allowed proof of work difficulty value for a block as a [`Uint256`] pub max_difficulty_target: Uint256, @@ -76,23 +205,18 @@ pub struct Params { /// Highest allowed proof of work difficulty as a floating number pub max_difficulty_target_f64: f64, - /// Block sample rate for filling the difficulty window (selects one every N blocks) - pub difficulty_sample_rate: u64, - - /// Size of sampled blocks window that is inspected to calculate the required difficulty of each block - pub sampled_difficulty_window_size: usize, - /// Size of full blocks window that is inspected to calculate the required difficulty of each block - pub legacy_difficulty_window_size: usize, + pub prior_difficulty_window_size: usize, - /// The minimum length a difficulty window (full or sampled) must have to trigger a DAA calculation - pub min_difficulty_window_len: usize, + /// The minimum size a difficulty window (full or sampled) must have to trigger a DAA calculation + pub min_difficulty_window_size: usize, + + pub prior_max_block_parents: u8, + pub prior_mergeset_size_limit: u64, + pub prior_merge_depth: u64, + pub prior_finality_depth: u64, + pub prior_pruning_depth: u64, - pub max_block_parents: u8, - pub mergeset_size_limit: u64, - pub merge_depth: u64, - pub finality_depth: u64, - pub pruning_depth: u64, pub coinbase_payload_script_public_key_max_len: u8, pub max_coinbase_payload_len: usize, pub max_tx_inputs: usize, @@ -104,24 +228,9 @@ pub struct Params { pub mass_per_sig_op: u64, pub max_block_mass: u64, - /// The parameter for scaling inverse KAS value to mass units (unpublished KIP-0009) + /// The parameter for scaling inverse KAS value to mass units (KIP-0009) pub storage_mass_parameter: u64, - /// DAA score from which storage mass calculation and transaction mass field are activated as a consensus rule - pub storage_mass_activation: ForkActivation, - - /// DAA score from which tx engine: - /// 1. Supports 8-byte integer arithmetic operations (previously limited to 4 bytes) - /// 2. Supports transaction introspection opcodes: - /// - OpTxInputCount (0xb3): Get number of inputs - /// - OpTxOutputCount (0xb4): Get number of outputs - /// - OpTxInputIndex (0xb9): Get current input index - /// - OpTxInputAmount (0xbe): Get input amount - /// - OpTxInputSpk (0xbf): Get input script public key - /// - OpTxOutputAmount (0xc2): Get output amount - /// - OpTxOutputSpk (0xc3): Get output script public key - pub kip10_activation: ForkActivation, - /// DAA score after which the pre-deflationary period switches to the deflationary period pub deflationary_phase_daa_score: u64, @@ -131,9 +240,8 @@ pub struct Params { pub max_block_level: BlockLevel, pub pruning_proof_m: u64, - /// Activation rules for when to enable using the payload field in transactions - pub payload_activation: ForkActivation, - pub runtime_sig_op_counting: ForkActivation, + pub crescendo: CrescendoParams, + pub crescendo_activation: ForkActivation, } fn unix_now() -> u64 { @@ -144,116 +252,135 @@ impl Params { /// Returns the size of the full blocks window that is inspected to calculate the past median time (legacy) #[inline] #[must_use] - pub fn legacy_past_median_time_window_size(&self) -> usize { - (2 * self.legacy_timestamp_deviation_tolerance - 1) as usize + pub fn prior_past_median_time_window_size(&self) -> usize { + (2 * self.timestamp_deviation_tolerance - 1) as usize } /// Returns the size of the sampled blocks window that is inspected to calculate the past median time #[inline] #[must_use] pub fn sampled_past_median_time_window_size(&self) -> usize { - self.past_median_time_sampled_window_size as usize + self.crescendo.past_median_time_sampled_window_size as usize } - /// Returns the size of the blocks window that is inspected to calculate the past median time, - /// depending on a selected parent DAA score + /// Returns the size of the blocks window that is inspected to calculate the past median time. #[inline] #[must_use] - pub fn past_median_time_window_size(&self, selected_parent_daa_score: u64) -> usize { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.sampled_past_median_time_window_size() - } else { - self.legacy_past_median_time_window_size() - } + pub fn past_median_time_window_size(&self) -> ForkedParam { + ForkedParam::new( + self.prior_past_median_time_window_size(), + self.sampled_past_median_time_window_size(), + self.crescendo_activation, + ) } - /// Returns the timestamp deviation tolerance, - /// depending on a selected parent DAA score + /// Returns the past median time sample rate #[inline] #[must_use] - pub fn timestamp_deviation_tolerance(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.new_timestamp_deviation_tolerance - } else { - self.legacy_timestamp_deviation_tolerance - } + pub fn past_median_time_sample_rate(&self) -> ForkedParam { + ForkedParam::new(1, self.crescendo.past_median_time_sample_rate, self.crescendo_activation) } - /// Returns the past median time sample rate, - /// depending on a selected parent DAA score + /// Returns the size of the blocks window that is inspected to calculate the difficulty #[inline] #[must_use] - pub fn past_median_time_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.past_median_time_sample_rate - } else { - 1 - } + pub fn difficulty_window_size(&self) -> ForkedParam { + ForkedParam::new( + self.prior_difficulty_window_size, + self.crescendo.sampled_difficulty_window_size as usize, + self.crescendo_activation, + ) } - /// Returns the size of the blocks window that is inspected to calculate the difficulty, - /// depending on a selected parent DAA score + /// Returns the difficulty sample rate #[inline] #[must_use] - pub fn difficulty_window_size(&self, selected_parent_daa_score: u64) -> usize { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.sampled_difficulty_window_size - } else { - self.legacy_difficulty_window_size - } + pub fn difficulty_sample_rate(&self) -> ForkedParam { + ForkedParam::new(1, self.crescendo.difficulty_sample_rate, self.crescendo_activation) } - /// Returns the difficulty sample rate, - /// depending on a selected parent DAA score + /// Returns the target time per block #[inline] #[must_use] - pub fn difficulty_sample_rate(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.difficulty_sample_rate - } else { - 1 - } + pub fn target_time_per_block(&self) -> ForkedParam { + ForkedParam::new(self.prior_target_time_per_block, self.crescendo.target_time_per_block, self.crescendo_activation) } - /// Returns the target time per block, - /// depending on a selected parent DAA score + /// Returns the expected number of blocks per second #[inline] #[must_use] - pub fn target_time_per_block(&self, _selected_parent_daa_score: u64) -> u64 { - self.target_time_per_block + pub fn bps(&self) -> ForkedParam { + ForkedParam::new( + 1000 / self.prior_target_time_per_block, + 1000 / self.crescendo.target_time_per_block, + self.crescendo_activation, + ) } - /// Returns the expected number of blocks per second - #[inline] - #[must_use] - pub fn bps(&self) -> u64 { - 1000 / self.target_time_per_block + pub fn ghostdag_k(&self) -> ForkedParam { + ForkedParam::new(self.prior_ghostdag_k, self.crescendo.ghostdag_k, self.crescendo_activation) } - pub fn daa_window_duration_in_blocks(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 - } else { - self.legacy_difficulty_window_size as u64 - } + pub fn max_block_parents(&self) -> ForkedParam { + ForkedParam::new(self.prior_max_block_parents, self.crescendo.max_block_parents, self.crescendo_activation) } - fn expected_daa_window_duration_in_milliseconds(&self, selected_parent_daa_score: u64) -> u64 { - if self.sampling_activation.is_active(selected_parent_daa_score) { - self.target_time_per_block * self.difficulty_sample_rate * self.sampled_difficulty_window_size as u64 - } else { - self.target_time_per_block * self.legacy_difficulty_window_size as u64 - } + pub fn mergeset_size_limit(&self) -> ForkedParam { + ForkedParam::new(self.prior_mergeset_size_limit, self.crescendo.mergeset_size_limit, self.crescendo_activation) + } + + pub fn merge_depth(&self) -> ForkedParam { + ForkedParam::new(self.prior_merge_depth, self.crescendo.merge_depth, self.crescendo_activation) + } + + pub fn finality_depth(&self) -> ForkedParam { + ForkedParam::new(self.prior_finality_depth, self.crescendo.finality_depth, self.crescendo_activation) + } + + pub fn pruning_depth(&self) -> ForkedParam { + ForkedParam::new(self.prior_pruning_depth, self.crescendo.pruning_depth, self.crescendo_activation) + } + + pub fn finality_duration_in_milliseconds(&self) -> ForkedParam { + ForkedParam::new( + self.prior_target_time_per_block * self.prior_finality_depth, + self.crescendo.target_time_per_block * self.crescendo.finality_depth, + self.crescendo_activation, + ) + } + + pub fn difficulty_window_duration_in_block_units(&self) -> ForkedParam { + ForkedParam::new( + self.prior_difficulty_window_size as u64, + self.crescendo.difficulty_sample_rate * self.crescendo.sampled_difficulty_window_size, + self.crescendo_activation, + ) + } + + fn expected_difficulty_window_duration_in_milliseconds(&self) -> ForkedParam { + ForkedParam::new( + self.prior_target_time_per_block * self.prior_difficulty_window_size as u64, + self.crescendo.target_time_per_block + * self.crescendo.difficulty_sample_rate + * self.crescendo.sampled_difficulty_window_size, + self.crescendo_activation, + ) } /// Returns the depth at which the anticone of a chain block is final (i.e., is a permanently closed set). /// Based on the analysis at /// and on the decomposition of merge depth (rule R-I therein) from finality depth (φ) - pub fn anticone_finalization_depth(&self) -> u64 { - let anticone_finalization_depth = self.finality_depth - + self.merge_depth - + 4 * self.mergeset_size_limit * self.ghostdag_k as u64 - + 2 * self.ghostdag_k as u64 + pub fn anticone_finalization_depth(&self) -> ForkedParam { + let prior_anticone_finalization_depth = self.prior_finality_depth + + self.prior_merge_depth + + 4 * self.prior_mergeset_size_limit * self.prior_ghostdag_k as u64 + + 2 * self.prior_ghostdag_k as u64 + + 2; + + let new_anticone_finalization_depth = self.crescendo.finality_depth + + self.crescendo.merge_depth + + 4 * self.crescendo.mergeset_size_limit * self.crescendo.ghostdag_k as u64 + + 2 * self.crescendo.ghostdag_k as u64 + 2; // In mainnet it's guaranteed that `self.pruning_depth` is greater @@ -261,7 +388,11 @@ impl Params { // a smaller (unsafe) pruning depth, so we return the minimum of // the two to avoid a situation where a block can be pruned and // not finalized. - min(self.pruning_depth, anticone_finalization_depth) + ForkedParam::new( + min(self.prior_pruning_depth, prior_anticone_finalization_depth), + min(self.crescendo.pruning_depth, new_anticone_finalization_depth), + self.crescendo_activation, + ) } /// Returns whether the sink timestamp is recent enough and the node is considered synced or nearly synced. @@ -270,7 +401,9 @@ impl Params { // We consider the node close to being synced if the sink (virtual selected parent) block // timestamp is within DAA window duration far in the past. Blocks mined over such DAG state would // enter the DAA window of fully-synced nodes and thus contribute to overall network difficulty - unix_now() < sink_timestamp + self.expected_daa_window_duration_in_milliseconds(sink_daa_score) + // + // [Crescendo]: both durations are nearly equal so this decision is negligible + unix_now() < sink_timestamp + self.expected_difficulty_window_duration_in_milliseconds().get(sink_daa_score) } else { // For testnets we consider the node to be synced if the sink timestamp is within a time range which // is overwhelmingly unlikely to pass without mined blocks even if net hashrate decreased dramatically. @@ -279,7 +412,8 @@ impl Params { // with significant testnet hashrate does not overwhelm the network with deep side-DAGs. // // We use DAA duration as baseline and scale it down with BPS (and divide by 3 for mining only when very close to current time on TN11) - let max_expected_duration_without_blocks_in_milliseconds = self.target_time_per_block * NEW_DIFFICULTY_WINDOW_DURATION / 3; // = DAA duration in milliseconds / bps / 3 + let max_expected_duration_without_blocks_in_milliseconds = + self.prior_target_time_per_block * NEW_DIFFICULTY_WINDOW_DURATION / 3; // = DAA duration in milliseconds / bps / 3 unix_now() < sink_timestamp + max_expected_duration_without_blocks_in_milliseconds } } @@ -299,10 +433,6 @@ impl Params { pub fn default_rpc_port(&self) -> u16 { self.net.default_rpc_port() } - - pub fn finality_duration(&self) -> u64 { - self.target_time_per_block * self.finality_depth - } } impl From for Params { @@ -322,7 +452,6 @@ impl From for Params { NetworkType::Mainnet => MAINNET_PARAMS, NetworkType::Testnet => match value.suffix { Some(10) => TESTNET_PARAMS, - Some(11) => TESTNET11_PARAMS, Some(x) => panic!("Testnet suffix {} is not supported", x), None => panic!("Testnet suffix not provided"), }, @@ -359,24 +488,18 @@ pub const MAINNET_PARAMS: Params = Params { ], net: NetworkId::new(NetworkType::Mainnet), genesis: GENESIS, - ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - target_time_per_block: 1000, - sampling_activation: ForkActivation::never(), + prior_ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, + prior_target_time_per_block: 1000, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - max_block_parents: 10, - mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, - merge_depth: 3600, - finality_depth: 86400, - pruning_depth: 185798, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, + prior_max_block_parents: 10, + prior_mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, + prior_merge_depth: 3600, + prior_finality_depth: 86400, + prior_pruning_depth: 185798, coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -395,8 +518,6 @@ pub const MAINNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -411,8 +532,8 @@ pub const MAINNET_PARAMS: Params = Params { max_block_level: 225, pruning_proof_m: 1000, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), }; pub const TESTNET_PARAMS: Params = Params { @@ -426,24 +547,18 @@ pub const TESTNET_PARAMS: Params = Params { ], net: NetworkId::with_suffix(NetworkType::Testnet, 10), genesis: TESTNET_GENESIS, - ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - target_time_per_block: 1000, - sampling_activation: ForkActivation::never(), + prior_ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, + prior_target_time_per_block: 1000, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - max_block_parents: 10, - mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, - merge_depth: 3600, - finality_depth: 86400, - pruning_depth: 185798, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, + prior_max_block_parents: 10, + prior_mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, + prior_merge_depth: 3600, + prior_finality_depth: 86400, + prior_pruning_depth: 185798, coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -462,8 +577,6 @@ pub const TESTNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: // We define a year as 365.25 days @@ -477,107 +590,36 @@ pub const TESTNET_PARAMS: Params = Params { max_block_level: 250, pruning_proof_m: 1000, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), -}; - -pub const TESTNET11_PARAMS: Params = Params { - dns_seeders: &[ - // This DNS seeder is run by Tiram - "seeder1-testnet-11.kaspad.net", - // This DNS seeder is run by supertypo - "n-testnet-11.kaspa.ws", - // This DNS seeder is run by -gerri- - "dnsseeder-kaspa-testnet11.x-con.at", - // This DNS seeder is run by H@H - "ns-testnet11.kaspa-dnsseeder.net", - ], - net: NetworkId::with_suffix(NetworkType::Testnet, 11), - genesis: TESTNET11_GENESIS, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation: ForkActivation::always(), // Sampling is activated from network inception - max_difficulty_target: MAX_DIFFICULTY_TARGET, - max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - - // - // ~~~~~~~~~~~~~~~~~~ BPS dependent constants ~~~~~~~~~~~~~~~~~~ - // - ghostdag_k: Testnet11Bps::ghostdag_k(), - target_time_per_block: Testnet11Bps::target_time_per_block(), - past_median_time_sample_rate: Testnet11Bps::past_median_time_sample_rate(), - difficulty_sample_rate: Testnet11Bps::difficulty_adjustment_sample_rate(), - max_block_parents: Testnet11Bps::max_block_parents(), - mergeset_size_limit: Testnet11Bps::mergeset_size_limit(), - merge_depth: Testnet11Bps::merge_depth_bound(), - finality_depth: Testnet11Bps::finality_depth(), - pruning_depth: Testnet11Bps::pruning_depth(), - pruning_proof_m: Testnet11Bps::pruning_proof_m(), - deflationary_phase_daa_score: Testnet11Bps::deflationary_phase_daa_score(), - pre_deflationary_phase_base_subsidy: Testnet11Bps::pre_deflationary_phase_base_subsidy(), - coinbase_maturity: Testnet11Bps::coinbase_maturity(), - - coinbase_payload_script_public_key_max_len: 150, - max_coinbase_payload_len: 204, - - max_tx_inputs: 10_000, - max_tx_outputs: 10_000, - max_signature_script_len: 1_000_000, - max_script_public_key_len: 1_000_000, - - mass_per_tx_byte: 1, - mass_per_script_pub_key_byte: 10, - mass_per_sig_op: 1000, - max_block_mass: 500_000, - - storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::always(), - // Roughly at Dec 3, 2024 1800 UTC - kip10_activation: ForkActivation::new(287238000), - payload_activation: ForkActivation::new(287238000), - - skip_proof_of_work: false, - max_block_level: 250, - - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), }; pub const SIMNET_PARAMS: Params = Params { dns_seeders: &[], net: NetworkId::new(NetworkType::Simnet), genesis: SIMNET_GENESIS, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - sampling_activation: ForkActivation::always(), // Sampling is activated from network inception + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, // // ~~~~~~~~~~~~~~~~~~ BPS dependent constants ~~~~~~~~~~~~~~~~~~ // // Note we use a 10 BPS configuration for simnet - ghostdag_k: Testnet11Bps::ghostdag_k(), - target_time_per_block: Testnet11Bps::target_time_per_block(), - past_median_time_sample_rate: Testnet11Bps::past_median_time_sample_rate(), - difficulty_sample_rate: Testnet11Bps::difficulty_adjustment_sample_rate(), + prior_ghostdag_k: TenBps::ghostdag_k(), + prior_target_time_per_block: TenBps::target_time_per_block(), // For simnet, we deviate from TN11 configuration and allow at least 64 parents in order to support mempool benchmarks out of the box - max_block_parents: if Testnet11Bps::max_block_parents() > 64 { Testnet11Bps::max_block_parents() } else { 64 }, - mergeset_size_limit: Testnet11Bps::mergeset_size_limit(), - merge_depth: Testnet11Bps::merge_depth_bound(), - finality_depth: Testnet11Bps::finality_depth(), - pruning_depth: Testnet11Bps::pruning_depth(), - pruning_proof_m: Testnet11Bps::pruning_proof_m(), - deflationary_phase_daa_score: Testnet11Bps::deflationary_phase_daa_score(), - pre_deflationary_phase_base_subsidy: Testnet11Bps::pre_deflationary_phase_base_subsidy(), - coinbase_maturity: Testnet11Bps::coinbase_maturity(), + prior_max_block_parents: if TenBps::max_block_parents() > 64 { TenBps::max_block_parents() } else { 64 }, + prior_mergeset_size_limit: TenBps::mergeset_size_limit(), + prior_merge_depth: TenBps::merge_depth_bound(), + prior_finality_depth: TenBps::finality_depth(), + prior_pruning_depth: TenBps::pruning_depth(), + pruning_proof_m: TenBps::pruning_proof_m(), + deflationary_phase_daa_score: TenBps::deflationary_phase_daa_score(), + pre_deflationary_phase_base_subsidy: TenBps::pre_deflationary_phase_base_subsidy(), + coinbase_maturity: TenBps::coinbase_maturity(), coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -593,38 +635,30 @@ pub const SIMNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::always(), - kip10_activation: ForkActivation::never(), skip_proof_of_work: true, // For simnet only, PoW can be simulated by default max_block_level: 250, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::always(), }; pub const DEVNET_PARAMS: Params = Params { dns_seeders: &[], net: NetworkId::new(NetworkType::Devnet), genesis: DEVNET_GENESIS, - ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, - legacy_timestamp_deviation_tolerance: LEGACY_TIMESTAMP_DEVIATION_TOLERANCE, - new_timestamp_deviation_tolerance: NEW_TIMESTAMP_DEVIATION_TOLERANCE, - past_median_time_sample_rate: Bps::<1>::past_median_time_sample_rate(), - past_median_time_sampled_window_size: MEDIAN_TIME_SAMPLED_WINDOW_SIZE, - target_time_per_block: 1000, - sampling_activation: ForkActivation::never(), + prior_ghostdag_k: LEGACY_DEFAULT_GHOSTDAG_K, + timestamp_deviation_tolerance: TIMESTAMP_DEVIATION_TOLERANCE, + prior_target_time_per_block: 1000, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: Bps::<1>::difficulty_adjustment_sample_rate(), - sampled_difficulty_window_size: DIFFICULTY_SAMPLED_WINDOW_SIZE as usize, - legacy_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, - min_difficulty_window_len: MIN_DIFFICULTY_WINDOW_LEN, - max_block_parents: 10, - mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, - merge_depth: 3600, - finality_depth: 86400, - pruning_depth: 185798, + prior_difficulty_window_size: LEGACY_DIFFICULTY_WINDOW_SIZE, + min_difficulty_window_size: MIN_DIFFICULTY_WINDOW_SIZE, + prior_max_block_parents: 10, + prior_mergeset_size_limit: (LEGACY_DEFAULT_GHOSTDAG_K as u64) * 10, + prior_merge_depth: 3600, + prior_finality_depth: 86400, + prior_pruning_depth: 185798, coinbase_payload_script_public_key_max_len: 150, max_coinbase_payload_len: 204, @@ -643,8 +677,6 @@ pub const DEVNET_PARAMS: Params = Params { max_block_mass: 500_000, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), // deflationary_phase_daa_score is the DAA score after which the pre-deflationary period // switches to the deflationary period. This number is calculated as follows: @@ -659,6 +691,6 @@ pub const DEVNET_PARAMS: Params = Params { max_block_level: 250, pruning_proof_m: 1000, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), }; diff --git a/consensus/core/src/constants.rs b/consensus/core/src/constants.rs index 450c12f678..54bb352011 100644 --- a/consensus/core/src/constants.rs +++ b/consensus/core/src/constants.rs @@ -15,6 +15,11 @@ pub const SOMPI_PER_KASPA: u64 = 100_000_000; /// The parameter for scaling inverse KAS value to mass units (KIP-0009) pub const STORAGE_MASS_PARAMETER: u64 = SOMPI_PER_KASPA * 10_000; +/// The parameter defining how much mass per byte to charge for when calculating +/// transient storage mass. Since normally the block mass limit is 500_000, this limits +/// block body byte size to 125_000 (KIP-0013). +pub const TRANSIENT_BYTE_TO_MASS_FACTOR: u64 = 4; + /// MaxSompi is the maximum transaction amount allowed in sompi. pub const MAX_SOMPI: u64 = 29_000_000_000 * SOMPI_PER_KASPA; diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index 132c6619f7..afe4bf11dc 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -97,14 +97,20 @@ pub enum RuleError { #[error("coinbase blue score of {0} is not the expected value of {1}")] BadCoinbasePayloadBlueScore(u64, u64), + #[error("coinbase mass commitment field is not zero")] + CoinbaseNonZeroMassCommitment, + #[error("transaction in isolation validation failed for tx {0}: {1}")] TxInIsolationValidationFailed(TransactionId, TxRuleError), - #[error("block exceeded mass limit of {0}")] - ExceedsMassLimit(u64), + #[error("block compute mass {0} exceeds limit of {1}")] + ExceedsComputeMassLimit(u64, u64), + + #[error("block transient storage mass {0} exceeds limit of {1}")] + ExceedsTransientMassLimit(u64, u64), - #[error("transaction {0} has mass field of {1} but mass should be at least {2}")] - MassFieldTooLow(TransactionId, u64, u64), + #[error("block persistent storage mass {0} exceeds limit of {1}")] + ExceedsStorageMassLimit(u64, u64), #[error("outpoint {0} is spent more than once on the same block")] DoubleSpendInSameBlock(TransactionOutpoint), diff --git a/consensus/core/src/hashing/tx.rs b/consensus/core/src/hashing/tx.rs index 9216a1c16e..f9cac0311a 100644 --- a/consensus/core/src/hashing/tx.rs +++ b/consensus/core/src/hashing/tx.rs @@ -18,8 +18,8 @@ pub fn hash(tx: &Transaction, include_mass_field: bool) -> Hash { /// Not intended for direct use by clients. Instead use `tx.id()` pub(crate) fn id(tx: &Transaction) -> TransactionId { - // Encode the transaction, replace signature script with zeroes, cut off - // payload and hash the result. + // Encode the transaction, replace signature script with an empty array, skip + // sigop counts and mass and hash the result. let encoding_flags = if tx.is_coinbase() { TX_ENCODING_FULL } else { TX_ENCODING_EXCLUDE_SIGNATURE_SCRIPT }; let mut hasher = kaspa_hashes::TransactionID::new(); @@ -43,8 +43,25 @@ fn write_transaction(hasher: &mut T, tx: &Transaction, encoding_flags hasher.update(tx.lock_time.to_le_bytes()).update(&tx.subnetwork_id).update(tx.gas.to_le_bytes()).write_var_bytes(&tx.payload); - // TODO: - // 1. Avoid passing a boolean and hash the mass only if > 0 (requires setting the mass to 0 on BBT). + /* + Design principles (mostly related to the new mass commitment field; see KIP-0009): + 1. The new mass field should not modify tx::id (since it is essentially a commitment by the miner re block space usage + so there is no need to modify the id definition which will require wide-spread changes in ecosystem software). + 2. Coinbase tx hash and id should ideally remain equal + + Solution: + 1. Hash the mass field only for tx::hash + 2. Hash the mass field only if mass > 0 + 3. Require in consensus that coinbase mass == 0 + + This way we have: + - Unique commitment for tx::hash per any possible mass value (with only zero being a no-op) + - tx::id remains unmodified + - Coinbase tx hash and id remain the same and equal + */ + + // TODO (post HF): + // 1. Avoid passing a boolean // 2. Use TxEncodingFlags to avoid including the mass for tx ID if include_mass_field { let mass = tx.mass(); diff --git a/consensus/core/src/mass/mod.rs b/consensus/core/src/mass/mod.rs index 67bcc63aef..0bee5fd2b7 100644 --- a/consensus/core/src/mass/mod.rs +++ b/consensus/core/src/mass/mod.rs @@ -1,5 +1,6 @@ use crate::{ config::params::Params, + constants::TRANSIENT_BYTE_TO_MASS_FACTOR, subnets::SUBNETWORK_ID_SIZE, tx::{Transaction, TransactionInput, TransactionOutput, VerifiableTransaction}, }; @@ -57,6 +58,77 @@ pub fn transaction_output_estimated_serialized_size(output: &TransactionOutput) size } +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct NonContextualMasses { + /// Compute mass + pub compute_mass: u64, + + /// Transient storage mass + pub transient_mass: u64, +} + +impl NonContextualMasses { + pub fn new(compute_mass: u64, transient_mass: u64) -> Self { + Self { compute_mass, transient_mass } + } + + /// Returns the maximum over all non-contextual masses (currently compute and transient). This + /// max value has no consensus meaning and should only be used for mempool-level simplification + /// such as obtaining a one-dimensional mass value when composing blocks templates. + pub fn max(&self) -> u64 { + self.compute_mass.max(self.transient_mass) + } +} + +impl std::fmt::Display for NonContextualMasses { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "compute: {}, transient: {}", self.compute_mass, self.transient_mass) + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct ContextualMasses { + /// Persistent storage mass + pub storage_mass: u64, +} + +impl ContextualMasses { + pub fn new(storage_mass: u64) -> Self { + Self { storage_mass } + } + + /// Returns the maximum over *all masses* (currently compute, transient and storage). This max + /// value has no consensus meaning and should only be used for mempool-level simplification such + /// as obtaining a one-dimensional mass value when composing blocks templates. + pub fn max(&self, non_contextual_masses: NonContextualMasses) -> u64 { + self.storage_mass.max(non_contextual_masses.max()) + } +} + +impl std::fmt::Display for ContextualMasses { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "storage: {}", self.storage_mass) + } +} + +impl std::cmp::PartialEq for ContextualMasses { + fn eq(&self, other: &u64) -> bool { + self.storage_mass.eq(other) + } +} + +pub type Mass = (NonContextualMasses, ContextualMasses); + +pub trait MassOps { + fn max(&self) -> u64; +} + +impl MassOps for Mass { + fn max(&self) -> u64 { + self.1.max(self.0) + } +} + // Note: consensus mass calculator operates on signed transactions. // To calculate mass for unsigned transactions, please use // `kaspa_wallet_core::tx::mass::MassCalculator` @@ -82,11 +154,12 @@ impl MassCalculator { } } - /// Calculates the compute mass of this transaction. This does not include the storage mass calculation below which - /// requires full UTXO context - pub fn calc_tx_compute_mass(&self, tx: &Transaction) -> u64 { + /// Calculates the non-contextual masses for this transaction (i.e., masses which can be calculated from + /// the transaction alone). These include compute and transient storage masses of this transaction. This + /// does not include the persistent storage mass calculation below which requires full UTXO context + pub fn calc_non_contextual_masses(&self, tx: &Transaction) -> NonContextualMasses { if tx.is_coinbase() { - return 0; + return NonContextualMasses::new(0, 0); } let size = transaction_estimated_serialized_size(tx); @@ -101,27 +174,26 @@ impl MassCalculator { let total_sigops: u64 = tx.inputs.iter().map(|input| input.sig_op_count as u64).sum(); let total_sigops_mass = total_sigops * self.mass_per_sig_op; - mass_for_size + total_script_public_key_mass + total_sigops_mass + let compute_mass = mass_for_size + total_script_public_key_mass + total_sigops_mass; + let transient_mass = size * TRANSIENT_BYTE_TO_MASS_FACTOR; + + NonContextualMasses::new(compute_mass, transient_mass) } - /// Calculates the storage mass for this populated transaction. + /// Calculates the contextual masses for this populated transaction. /// Assumptions which must be verified before this call: /// 1. All output values are non-zero /// 2. At least one input (unless coinbase) /// /// Otherwise this function should never fail. - pub fn calc_tx_storage_mass(&self, tx: &impl VerifiableTransaction) -> Option { + pub fn calc_contextual_masses(&self, tx: &impl VerifiableTransaction) -> Option { calc_storage_mass( tx.is_coinbase(), tx.populated_inputs().map(|(_, entry)| entry.amount), tx.outputs().iter().map(|out| out.value), self.storage_mass_parameter, ) - } - - /// Calculates the overall mass of this transaction, combining both compute and storage masses. - pub fn calc_tx_overall_mass(&self, tx: &impl VerifiableTransaction, cached_compute_mass: Option) -> Option { - self.calc_tx_storage_mass(tx).map(|mass| mass.max(cached_compute_mass.unwrap_or_else(|| self.calc_tx_compute_mass(tx.tx())))) + .map(ContextualMasses::new) } } @@ -209,26 +281,26 @@ mod tests { // Assert the formula: max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) // - let storage_mass = MassCalculator::new(0, 0, 0, 10u64.pow(12)).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, 10u64.pow(12)).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 0); // Compounds from 3 to 2, with symmetric outputs and no fee, should be zero // Create asymmetry tx.tx.outputs[0].value = 50; tx.tx.outputs[1].value = 550; let storage_mass_parameter = 10u64.pow(12); - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, storage_mass_parameter / 50 + storage_mass_parameter / 550 - 3 * (storage_mass_parameter / 200)); // Create a tx with more outs than ins let base_value = 10_000 * SOMPI_PER_KASPA; let mut tx = generate_tx_from_amounts(&[base_value, base_value, base_value * 2], &[base_value; 4]); let storage_mass_parameter = STORAGE_MASS_PARAMETER; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 4); // Inputs are above C so they don't contribute negative mass, 4 outputs exactly equal C each charge 1 let mut tx2 = tx.clone(); tx2.tx.outputs[0].value = 10 * SOMPI_PER_KASPA; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx2.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx2.as_verifiable()).unwrap(); assert_eq!(storage_mass, 1003); // Increase values over the lim @@ -236,7 +308,7 @@ mod tests { out.value += 1 } tx.entries[0].as_mut().unwrap().amount += tx.tx.outputs.len() as u64; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 0); // Now create 2:2 transaction @@ -244,19 +316,19 @@ mod tests { let mut tx = generate_tx_from_amounts(&[100, 200], &[50, 250]); let storage_mass_parameter = 10u64.pow(12); - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 9000000000); // Set outputs to be equal to inputs tx.tx.outputs[0].value = 100; tx.tx.outputs[1].value = 200; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 0); // Remove an output and make sure the other is small enough to make storage mass greater than zero tx.tx.outputs.pop(); tx.tx.outputs[0].value = 50; - let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable()).unwrap(); + let storage_mass = MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_contextual_masses(&tx.as_verifiable()).unwrap(); assert_eq!(storage_mass, 5000000000); } diff --git a/consensus/core/src/network.rs b/consensus/core/src/network.rs index 2f81444b3c..e96a95e8fc 100644 --- a/consensus/core/src/network.rs +++ b/consensus/core/src/network.rs @@ -253,10 +253,9 @@ impl NetworkId { } pub fn iter() -> impl Iterator { - static NETWORK_IDS: [NetworkId; 5] = [ + static NETWORK_IDS: [NetworkId; 4] = [ NetworkId::new(NetworkType::Mainnet), NetworkId::with_suffix(NetworkType::Testnet, 10), - NetworkId::with_suffix(NetworkType::Testnet, 11), NetworkId::new(NetworkType::Devnet), NetworkId::new(NetworkType::Simnet), ]; diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index 769d29452c..194e06f2ab 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -26,6 +26,7 @@ use std::{ }; use wasm_bindgen::prelude::*; +use crate::mass::{ContextualMasses, NonContextualMasses}; use crate::{ hashing, subnets::{self, SubnetworkId}, @@ -174,6 +175,7 @@ pub struct Transaction { #[serde(with = "serde_bytes")] pub payload: Vec, + /// Holds a commitment to the storage mass (KIP-0009) #[serde(default)] mass: TransactionMass, @@ -230,16 +232,18 @@ impl Transaction { self.id } - /// Set the mass field of this transaction. The mass field is expected depending on hard-forks which are currently - /// activated only on some testnets. The field has no effect on tx ID so no need to finalize following this call. + /// Set the storage mass commitment field of this transaction. This field is expected to be activated on mainnet + /// as part of the Crescendo hardfork. The field has no effect on tx ID so no need to finalize following this call. pub fn set_mass(&self, mass: u64) { self.mass.0.store(mass, SeqCst) } + /// Read the storage mass commitment pub fn mass(&self) -> u64 { self.mass.0.load(SeqCst) } + /// Set the storage mass commitment of the passed transaction pub fn with_mass(self, mass: u64) -> Self { self.set_mass(mass); self @@ -398,14 +402,14 @@ pub struct MutableTransaction = std::sync::Arc>, /// Populated fee pub calculated_fee: Option, - /// Populated compute mass (does not include the storage mass) - pub calculated_compute_mass: Option, + /// Populated non-contextual masses (does not include the storage mass) + pub calculated_non_contextual_masses: Option, } impl> MutableTransaction { pub fn new(tx: T) -> Self { let num_inputs = tx.as_ref().inputs.len(); - Self { tx, entries: vec![None; num_inputs], calculated_fee: None, calculated_compute_mass: None } + Self { tx, entries: vec![None; num_inputs], calculated_fee: None, calculated_non_contextual_masses: None } } pub fn id(&self) -> TransactionId { @@ -414,7 +418,7 @@ impl> MutableTransaction { pub fn with_entries(tx: T, entries: Vec) -> Self { assert_eq!(tx.as_ref().inputs.len(), entries.len()); - Self { tx, entries: entries.into_iter().map(Some).collect(), calculated_fee: None, calculated_compute_mass: None } + Self { tx, entries: entries.into_iter().map(Some).collect(), calculated_fee: None, calculated_non_contextual_masses: None } } /// Returns the tx wrapped as a [`VerifiableTransaction`]. Note that this function @@ -430,7 +434,7 @@ impl> MutableTransaction { } pub fn is_fully_populated(&self) -> bool { - self.is_verifiable() && self.calculated_fee.is_some() && self.calculated_compute_mass.is_some() + self.is_verifiable() && self.calculated_fee.is_some() && self.calculated_non_contextual_masses.is_some() } pub fn missing_outpoints(&self) -> impl Iterator + '_ { @@ -450,17 +454,14 @@ impl> MutableTransaction { } } - /// Returns the calculated feerate. The feerate is calculated as the amount of fee - /// this transactions pays per gram of the full contextual (compute & storage) mass. The - /// function returns a value when calculated fee exists and the contextual mass is greater - /// than zero, otherwise `None` is returned. + /// Returns the calculated feerate. The feerate is calculated as the amount of fee this + /// transactions pays per gram of the aggregated contextual mass (max over compute, transient + /// and storage masses). The function returns a value when calculated fee and calculated masses + /// exist, otherwise `None` is returned. pub fn calculated_feerate(&self) -> Option { - let contextual_mass = self.tx.as_ref().mass(); - if contextual_mass > 0 { - self.calculated_fee.map(|fee| fee as f64 / contextual_mass as f64) - } else { - None - } + self.calculated_non_contextual_masses + .map(|non_contextual_masses| ContextualMasses::new(self.tx.as_ref().mass()).max(non_contextual_masses)) + .and_then(|max_mass| self.calculated_fee.map(|fee| fee as f64 / max_mass as f64)) } /// A function for estimating the amount of memory bytes used by this transaction (dedicated to mempool usage). diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 21e5bf5573..61ba63c776 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -61,6 +61,7 @@ use kaspa_consensus_core::{ tx::TxResult, }, header::Header, + mass::{ContextualMasses, NonContextualMasses}, merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, @@ -443,13 +444,12 @@ impl ConsensusApi for Consensus { self.virtual_processor.populate_mempool_transactions_in_parallel(transactions) } - fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { - self.services.mass_calculator.calc_tx_compute_mass(transaction) + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { + self.services.mass_calculator.calc_non_contextual_masses(transaction) } - fn calculate_transaction_storage_mass(&self, _transaction: &MutableTransaction) -> Option { - // self.services.mass_calculator.calc_tx_storage_mass(&transaction.as_verifiable()) - unimplemented!("unsupported at the API level until KIP9 is finalized") + fn calculate_transaction_contextual_masses(&self, transaction: &MutableTransaction) -> Option { + self.services.mass_calculator.calc_contextual_masses(&transaction.as_verifiable()) } fn get_stats(&self) -> ConsensusStats { @@ -750,7 +750,7 @@ impl ConsensusApi for Consensus { } fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { - let storage_mass_activated = self.config.storage_mass_activation.is_active(pov_daa_score); + let storage_mass_activated = self.config.crescendo_activation.is_active(pov_daa_score); calc_hash_merkle_root(txs.iter(), storage_mass_activated) } @@ -813,7 +813,7 @@ impl ConsensusApi for Consensus { // max_blocks has to be greater than the merge set size limit fn get_hashes_between(&self, low: Hash, high: Hash, max_blocks: usize) -> ConsensusResult<(Vec, Hash)> { let _guard = self.pruning_lock.blocking_read(); - assert!(max_blocks as u64 > self.config.mergeset_size_limit); + assert!(max_blocks as u64 > self.config.mergeset_size_limit().upper_bound()); self.validate_block_exists(low)?; self.validate_block_exists(high)?; @@ -1000,16 +1000,21 @@ impl ConsensusApi for Consensus { self.validate_block_exists(hash)?; // In order to guarantee the chain height is at least k, we check that the pruning point is not genesis. - if self.pruning_point() == self.config.genesis.hash { + let pruning_point = self.pruning_point(); + if pruning_point == self.config.genesis.hash { return Err(ConsensusError::UnexpectedPruningPoint); } + // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent + // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data) + let ghostdag_k = self.config.ghostdag_k().get(self.headers_store.get_daa_score(pruning_point).unwrap()); + // Note: the method `get_ghostdag_chain_k_depth` might return a partial chain if data is missing. // Ideally this node when synced would validate it got all of the associated data up to k blocks // back and then we would be able to assert we actually got `k + 1` blocks, however we choose to // simply ignore, since if the data was truly missing we wouldn't accept the staging consensus in // the first place - Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash)) + Ok(self.services.pruning_proof_manager.get_ghostdag_chain_k_depth(hash, ghostdag_k)) } fn create_block_locator_from_pruning_point(&self, high: Hash, limit: usize) -> ConsensusResult> { diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 2ff7578e1d..47b1e88f23 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -40,7 +40,7 @@ pub type DbSyncManager = SyncManager< pub type DbPruningPointManager = PruningPointManager; -pub type DbBlockDepthManager = BlockDepthManager; +pub type DbBlockDepthManager = BlockDepthManager; pub type DbParentsManager = ParentsManager>; pub struct ConsensusServices { @@ -93,27 +93,29 @@ impl ConsensusServices { storage.block_window_cache_for_difficulty.clone(), storage.block_window_cache_for_past_median_time.clone(), params.max_difficulty_target, - params.target_time_per_block, - params.sampling_activation, - params.legacy_difficulty_window_size, - params.sampled_difficulty_window_size, - params.min_difficulty_window_len, - params.difficulty_sample_rate, - params.legacy_past_median_time_window_size(), + params.prior_target_time_per_block, + params.crescendo.target_time_per_block, + params.crescendo_activation, + params.prior_difficulty_window_size, + params.crescendo.sampled_difficulty_window_size as usize, + params.min_difficulty_window_size, + params.crescendo.difficulty_sample_rate, + params.prior_past_median_time_window_size(), params.sampled_past_median_time_window_size(), - params.past_median_time_sample_rate, + params.crescendo.past_median_time_sample_rate, ); let depth_manager = BlockDepthManager::new( - params.merge_depth, - params.finality_depth, + params.merge_depth(), + params.finality_depth(), params.genesis.hash, storage.depth_store.clone(), reachability_service.clone(), storage.ghostdag_store.clone(), + storage.headers_store.clone(), ); let ghostdag_manager = GhostdagManager::new( params.genesis.hash, - params.ghostdag_k, + params.ghostdag_k(), storage.ghostdag_store.clone(), relations_services[0].clone(), storage.headers_store.clone(), @@ -125,7 +127,7 @@ impl ConsensusServices { params.max_coinbase_payload_len, params.deflationary_phase_daa_score, params.pre_deflationary_phase_base_subsidy, - params.target_time_per_block, + params.bps(), ); let mass_calculator = MassCalculator::new( @@ -140,20 +142,16 @@ impl ConsensusServices { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, tx_script_cache_counters, mass_calculator.clone(), - params.storage_mass_activation, - params.kip10_activation, - params.payload_activation, - params.runtime_sig_op_counting, + params.crescendo_activation, ); let pruning_point_manager = PruningPointManager::new( - params.pruning_depth, - params.finality_depth, + params.pruning_depth(), + params.finality_depth(), params.genesis.hash, reachability_service.clone(), storage.ghostdag_store.clone(), @@ -182,12 +180,12 @@ impl ConsensusServices { params.genesis.hash, params.pruning_proof_m, params.anticone_finalization_depth(), - params.ghostdag_k, + params.ghostdag_k(), is_consensus_exiting, )); let sync_manager = SyncManager::new( - params.mergeset_size_limit as usize, + params.mergeset_size_limit(), reachability_service.clone(), dag_traversal_manager.clone(), storage.ghostdag_store.clone(), diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 5e2ff8fcde..ed1ac9f18b 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -81,8 +81,9 @@ impl ConsensusStorage { let perf_params = &config.perf; // Lower and upper bounds - let pruning_depth = params.pruning_depth as usize; - let pruning_size_for_caches = (params.pruning_depth + params.finality_depth) as usize; // Upper bound for any block/header related data + // [Crescendo]: all usages of pruning upper bounds also bound by actual memory bytes, so we can safely use the larger values + let pruning_depth = params.pruning_depth().upper_bound() as usize; + let pruning_size_for_caches = pruning_depth + params.finality_depth().upper_bound() as usize; // Upper bound for any block/header related data let level_lower_bound = 2 * params.pruning_proof_m as usize; // Number of items lower bound for level-related caches // Budgets in bytes. All byte budgets overall sum up to ~1GB of memory (which obviously takes more low level alloc space) @@ -107,8 +108,10 @@ impl ConsensusStorage { let reachability_data_bytes = size_of::() + size_of::(); let ghostdag_compact_bytes = size_of::() + size_of::(); let headers_compact_bytes = size_of::() + size_of::(); - let difficulty_window_bytes = params.difficulty_window_size(0) * size_of::(); - let median_window_bytes = params.past_median_time_window_size(0) * size_of::(); + + // If the fork is already scheduled, prefer the long-term, permanent values + let difficulty_window_bytes = params.difficulty_window_size().after() * size_of::(); + let median_window_bytes = params.past_median_time_window_size().after() * size_of::(); // Cache policy builders let daa_excluded_builder = diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 08eb49f63b..44ed9f453d 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -1,7 +1,7 @@ use super::BlockBodyProcessor; use crate::{ errors::{BlockProcessResult, RuleError}, - model::stores::statuses::StatusesStoreReader, + model::stores::{ghostdag::GhostdagStoreReader, headers::HeaderStoreReader, statuses::StatusesStoreReader}, processes::{ transaction_validator::{ tx_validation_in_header_context::{LockTimeArg, LockTimeType}, @@ -10,7 +10,7 @@ use crate::{ window::WindowManager, }, }; -use kaspa_consensus_core::block::Block; +use kaspa_consensus_core::{block::Block, errors::tx::TxRuleError}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use once_cell::unsync::Lazy; @@ -19,6 +19,7 @@ use std::sync::Arc; impl BlockBodyProcessor { pub fn validate_body_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { self.check_parent_bodies_exist(block)?; + self.check_coinbase_outputs_limit(block)?; self.check_coinbase_blue_score_and_subsidy(block)?; self.check_block_transactions_in_context(block) } @@ -60,6 +61,32 @@ impl BlockBodyProcessor { Ok(()) } + fn check_coinbase_outputs_limit(&self, block: &Block) -> BlockProcessResult<()> { + // [Crescendo]: coinbase_outputs_limit depends on ghostdag k and thus depends on fork activation + // which makes it header contextual. + // + // TODO (post HF): move this check back to transaction in isolation validation + + // [Crescendo]: Ghostdag k activation is decided based on selected parent DAA score + // so we follow the same methodology for coinbase output limit (which is driven from the + // actual bound on the number of blue blocks in the mergeset). + // + // Note that body validation in context is not called for trusted blocks, so we can safely assume + // the selected parent exists and its daa score is accessible + let selected_parent = self.ghostdag_store.get_selected_parent(block.hash()).unwrap(); + let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + let coinbase_outputs_limit = self.ghostdag_k.get(selected_parent_daa_score) as u64 + 2; + + let tx = &block.transactions[0]; + if tx.outputs.len() as u64 > coinbase_outputs_limit { + return Err(RuleError::TxInIsolationValidationFailed( + tx.id(), + TxRuleError::CoinbaseTooManyOutputs(tx.outputs.len(), coinbase_outputs_limit), + )); + } + Ok(()) + } + fn check_coinbase_blue_score_and_subsidy(self: &Arc, block: &Block) -> BlockProcessResult<()> { match self.coinbase_manager.deserialize_coinbase_payload(&block.transactions[0].payload) { Ok(data) => { diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index 4c6139846b..2afd80421b 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -2,17 +2,23 @@ use std::{collections::HashSet, sync::Arc}; use super::BlockBodyProcessor; use crate::errors::{BlockProcessResult, RuleError}; -use kaspa_consensus_core::{block::Block, merkle::calc_hash_merkle_root, tx::TransactionOutpoint}; +use kaspa_consensus_core::{ + block::Block, + mass::{ContextualMasses, Mass, NonContextualMasses}, + merkle::calc_hash_merkle_root, + tx::TransactionOutpoint, +}; impl BlockBodyProcessor { - pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { - let storage_mass_activated = self.storage_mass_activation.is_active(block.header.daa_score); + pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { + let crescendo_activated = self.crescendo_activation.is_active(block.header.daa_score); Self::check_has_transactions(block)?; - Self::check_hash_merkle_root(block, storage_mass_activated)?; + Self::check_hash_merkle_root(block, crescendo_activated)?; Self::check_only_one_coinbase(block)?; self.check_transactions_in_isolation(block)?; - let mass = self.check_block_mass(block, storage_mass_activated)?; + self.check_coinbase_has_zero_mass(block, crescendo_activated)?; + let mass = self.check_block_mass(block, crescendo_activated)?; self.check_duplicate_transactions(block)?; self.check_block_double_spends(block)?; self.check_no_chained_transactions(block)?; @@ -28,8 +34,8 @@ impl BlockBodyProcessor { Ok(()) } - fn check_hash_merkle_root(block: &Block, storage_mass_activated: bool) -> BlockProcessResult<()> { - let calculated = calc_hash_merkle_root(block.transactions.iter(), storage_mass_activated); + fn check_hash_merkle_root(block: &Block, crescendo_activated: bool) -> BlockProcessResult<()> { + let calculated = calc_hash_merkle_root(block.transactions.iter(), crescendo_activated); if calculated != block.header.hash_merkle_root { return Err(RuleError::BadMerkleRoot(block.header.hash_merkle_root, calculated)); } @@ -57,34 +63,56 @@ impl BlockBodyProcessor { Ok(()) } - fn check_block_mass(self: &Arc, block: &Block, storage_mass_activated: bool) -> BlockProcessResult { - let mut total_mass: u64 = 0; - if storage_mass_activated { + fn check_coinbase_has_zero_mass(&self, block: &Block, crescendo_activated: bool) -> BlockProcessResult<()> { + // TODO (post HF): move to check_coinbase_in_isolation + if crescendo_activated && block.transactions[0].mass() > 0 { + return Err(RuleError::CoinbaseNonZeroMassCommitment); + } + Ok(()) + } + + fn check_block_mass(self: &Arc, block: &Block, crescendo_activated: bool) -> BlockProcessResult { + if crescendo_activated { + let mut total_compute_mass: u64 = 0; + let mut total_transient_mass: u64 = 0; + let mut total_storage_mass: u64 = 0; for tx in block.transactions.iter() { - // This is only the compute part of the mass, the storage part cannot be computed here - let calculated_tx_compute_mass = self.mass_calculator.calc_tx_compute_mass(tx); - let committed_contextual_mass = tx.mass(); - // We only check the lower-bound here, a precise check of the mass commitment - // is done when validating the tx in context - if committed_contextual_mass < calculated_tx_compute_mass { - return Err(RuleError::MassFieldTooLow(tx.id(), committed_contextual_mass, calculated_tx_compute_mass)); + // Calculate the non-contextual masses + let NonContextualMasses { compute_mass, transient_mass } = self.mass_calculator.calc_non_contextual_masses(tx); + + // Read the storage mass commitment. This value cannot be computed here w/o UTXO context + // so we use the commitment. Later on, when the transaction is verified in context, we use + // the context to calculate the expected storage mass and verify it matches this commitment + let storage_mass_commitment = tx.mass(); + + // Sum over the various masses separately + total_compute_mass = total_compute_mass.saturating_add(compute_mass); + total_transient_mass = total_transient_mass.saturating_add(transient_mass); + total_storage_mass = total_storage_mass.saturating_add(storage_mass_commitment); + + // Verify all limits + if total_compute_mass > self.max_block_mass { + return Err(RuleError::ExceedsComputeMassLimit(total_compute_mass, self.max_block_mass)); } - // Sum over the committed masses - total_mass = total_mass.saturating_add(committed_contextual_mass); - if total_mass > self.max_block_mass { - return Err(RuleError::ExceedsMassLimit(self.max_block_mass)); + if total_transient_mass > self.max_block_mass { + return Err(RuleError::ExceedsTransientMassLimit(total_transient_mass, self.max_block_mass)); + } + if total_storage_mass > self.max_block_mass { + return Err(RuleError::ExceedsStorageMassLimit(total_storage_mass, self.max_block_mass)); } } + Ok((NonContextualMasses::new(total_compute_mass, total_transient_mass), ContextualMasses::new(total_storage_mass))) } else { + let mut total_mass: u64 = 0; for tx in block.transactions.iter() { - let calculated_tx_mass = self.mass_calculator.calc_tx_compute_mass(tx); - total_mass = total_mass.saturating_add(calculated_tx_mass); + let compute_mass = self.mass_calculator.calc_non_contextual_masses(tx).compute_mass; + total_mass = total_mass.saturating_add(compute_mass); if total_mass > self.max_block_mass { - return Err(RuleError::ExceedsMassLimit(self.max_block_mass)); + return Err(RuleError::ExceedsComputeMassLimit(total_mass, self.max_block_mass)); } } + Ok((NonContextualMasses::new(total_mass, 0), ContextualMasses::new(0))) } - Ok(total_mass) } fn check_block_double_spends(self: &Arc, block: &Block) -> BlockProcessResult<()> { @@ -415,7 +443,7 @@ mod tests { txs[1].inputs[0].sig_op_count = 255; txs[1].inputs[1].sig_op_count = 255; block.header.hash_merkle_root = calc_hash_merkle_root(txs.iter()); - assert_match!(body_processor.validate_body_in_isolation(&block.to_immutable()), Err(RuleError::ExceedsMassLimit(_))); + assert_match!(body_processor.validate_body_in_isolation(&block.to_immutable()), Err(RuleError::ExceedsComputeMassLimit(_, _))); let mut block = example_block.clone(); let txs = &mut block.transactions; diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 7bad12ce3f..4229ea263c 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -28,10 +28,11 @@ use kaspa_consensus_core::{ blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, config::{ genesis::GenesisBlock, - params::{ForkActivation, Params}, + params::{ForkActivation, ForkedParam, Params}, }, - mass::MassCalculator, + mass::{Mass, MassCalculator, MassOps}, tx::Transaction, + KType, }; use kaspa_consensus_notify::{ notification::{BlockAddedNotification, Notification}, @@ -59,6 +60,7 @@ pub struct BlockBodyProcessor { // Config pub(super) max_block_mass: u64, pub(super) genesis: GenesisBlock, + pub(super) ghostdag_k: ForkedParam, // Stores pub(super) statuses_store: Arc>, @@ -87,7 +89,7 @@ pub struct BlockBodyProcessor { counters: Arc, /// Storage mass hardfork DAA score - pub(crate) storage_mass_activation: ForkActivation, + pub(crate) crescendo_activation: ForkActivation, } impl BlockBodyProcessor { @@ -113,6 +115,7 @@ impl BlockBodyProcessor { max_block_mass: params.max_block_mass, genesis: params.genesis.clone(), + ghostdag_k: params.ghostdag_k(), statuses_store: storage.statuses_store.clone(), ghostdag_store: storage.ghostdag_store.clone(), @@ -130,7 +133,7 @@ impl BlockBodyProcessor { task_manager: BlockTaskDependencyManager::new(), notification_root, counters, - storage_mass_activation: params.storage_mass_activation, + crescendo_activation: params.crescendo_activation, } } @@ -217,11 +220,11 @@ impl BlockBodyProcessor { // Report counters self.counters.body_counts.fetch_add(1, Ordering::Relaxed); self.counters.txs_counts.fetch_add(block.transactions.len() as u64, Ordering::Relaxed); - self.counters.mass_counts.fetch_add(mass, Ordering::Relaxed); + self.counters.mass_counts.fetch_add(mass.max(), Ordering::Relaxed); Ok(BlockStatus::StatusUTXOPendingVerification) } - fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { + fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { let mass = self.validate_body_in_isolation(block)?; if !is_trusted { self.validate_body_in_context(block)?; diff --git a/consensus/src/pipeline/header_processor/post_pow_validation.rs b/consensus/src/pipeline/header_processor/post_pow_validation.rs index 6b12b4729c..d5f4cddb55 100644 --- a/consensus/src/pipeline/header_processor/post_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/post_pow_validation.rs @@ -11,7 +11,7 @@ impl HeaderProcessor { self.check_blue_score(ctx, header)?; self.check_blue_work(ctx, header)?; self.check_median_timestamp(ctx, header)?; - self.check_merge_size_limit(ctx)?; + self.check_mergeset_size_limit(ctx)?; self.check_bounded_merge_depth(ctx)?; self.check_pruning_point(ctx, header)?; self.check_indirect_parents(ctx, header) @@ -28,10 +28,11 @@ impl HeaderProcessor { Ok(()) } - pub fn check_merge_size_limit(&self, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { + pub fn check_mergeset_size_limit(&self, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { let mergeset_size = ctx.ghostdag_data().mergeset_size() as u64; - if mergeset_size > self.mergeset_size_limit { - return Err(RuleError::MergeSetTooBig(mergeset_size, self.mergeset_size_limit)); + let mergeset_size_limit = self.mergeset_size_limit.get(ctx.selected_parent_daa_score()); + if mergeset_size > mergeset_size_limit { + return Err(RuleError::MergeSetTooBig(mergeset_size, mergeset_size_limit)); } Ok(()) } @@ -54,15 +55,23 @@ impl HeaderProcessor { pub fn check_indirect_parents(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { let expected_block_parents = self.parents_manager.calc_block_parents(ctx.pruning_point(), header.direct_parents()); + let crescendo_activated = self.crescendo_activation.is_active(ctx.selected_parent_daa_score()); if header.parents_by_level.len() != expected_block_parents.len() || !expected_block_parents.iter().enumerate().all(|(block_level, expected_level_parents)| { let header_level_parents = &header.parents_by_level[block_level]; if header_level_parents.len() != expected_level_parents.len() { return false; } - - let expected_set = HashSet::<&Hash>::from_iter(expected_level_parents); - header_level_parents.iter().all(|header_parent| expected_set.contains(header_parent)) + // Optimistic path where both arrays are identical also in terms of order + if header_level_parents == expected_level_parents { + return true; + } + if crescendo_activated { + HashSet::<&Hash>::from_iter(header_level_parents) == HashSet::<&Hash>::from_iter(expected_level_parents) + } else { + let expected_set = HashSet::<&Hash>::from_iter(expected_level_parents); + header_level_parents.iter().all(|header_parent| expected_set.contains(header_parent)) + } }) { return Err(RuleError::UnexpectedIndirectParents( diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index cce6411054..e153c18274 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -17,7 +17,7 @@ impl HeaderProcessor { pub(super) fn validate_header_in_isolation(&self, header: &Header) -> BlockProcessResult { self.check_header_version(header)?; self.check_block_timestamp_in_isolation(header)?; - self.check_parents_limit(header)?; + self.check_parents_limit_upper_bound(header)?; Self::check_parents_not_origin(header)?; self.check_pow_and_calc_block_level(header) } @@ -44,13 +44,16 @@ impl HeaderProcessor { Ok(()) } - fn check_parents_limit(&self, header: &Header) -> BlockProcessResult<()> { + fn check_parents_limit_upper_bound(&self, header: &Header) -> BlockProcessResult<()> { if header.direct_parents().is_empty() { return Err(RuleError::NoParents); } - if header.direct_parents().len() > self.max_block_parents as usize { - return Err(RuleError::TooManyParents(header.direct_parents().len(), self.max_block_parents as usize)); + // [Crescendo]: moved the tight parents limit check to pre_pow_validation since it requires selected parent DAA score info + // which is available only post ghostdag. We keep this upper bound check here since this method is applied to trusted blocks + // as well. + if header.direct_parents().len() > self.max_block_parents.upper_bound() as usize { + return Err(RuleError::TooManyParents(header.direct_parents().len(), self.max_block_parents.upper_bound() as usize)); } Ok(()) diff --git a/consensus/src/pipeline/header_processor/pre_pow_validation.rs b/consensus/src/pipeline/header_processor/pre_pow_validation.rs index 7764e1c150..e896e4f5a3 100644 --- a/consensus/src/pipeline/header_processor/pre_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_pow_validation.rs @@ -6,11 +6,26 @@ use kaspa_consensus_core::header::Header; impl HeaderProcessor { pub(super) fn pre_pow_validation(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { + self.check_parents_limit(ctx, header)?; self.check_pruning_violation(ctx)?; self.check_difficulty_and_daa_score(ctx, header)?; Ok(()) } + // TODO (post HF): move back to pre_ghostdag_validation (substitute for check_parents_limit_upper_bound) + fn check_parents_limit(&self, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { + if header.direct_parents().is_empty() { + return Err(RuleError::NoParents); + } + + let max_block_parents = self.max_block_parents.get(ctx.selected_parent_daa_score()) as usize; + if header.direct_parents().len() > max_block_parents { + return Err(RuleError::TooManyParents(header.direct_parents().len(), max_block_parents)); + } + + Ok(()) + } + fn check_pruning_violation(&self, ctx: &HeaderProcessingContext) -> BlockProcessResult<()> { let known_parents = ctx.direct_known_parents(); diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index f467b6d975..8166754dbd 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -14,7 +14,7 @@ use crate::{ daa::DbDaaStore, depth::DbDepthStore, ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, - headers::DbHeadersStore, + headers::{DbHeadersStore, HeaderStoreReader}, headers_selected_tip::{DbHeadersSelectedTipStore, HeadersSelectedTipStoreReader}, pruning::{DbPruningStore, PruningPointInfo, PruningStoreReader}, reachability::{DbReachabilityStore, StagingReachabilityStore}, @@ -32,7 +32,10 @@ use itertools::Itertools; use kaspa_consensus_core::{ blockhash::{BlockHashes, ORIGIN}, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, - config::genesis::GenesisBlock, + config::{ + genesis::GenesisBlock, + params::{ForkActivation, ForkedParam}, + }, header::Header, BlockHashSet, BlockLevel, }; @@ -56,6 +59,7 @@ pub struct HeaderProcessingContext { // Staging data pub ghostdag_data: Option>, + pub selected_parent_daa_score: Option, // [Crescendo] pub block_window_for_difficulty: Option>, pub block_window_for_past_median_time: Option>, pub mergeset_non_daa: Option, @@ -78,6 +82,7 @@ impl HeaderProcessingContext { pruning_info, known_parents, ghostdag_data: None, + selected_parent_daa_score: None, block_window_for_difficulty: None, mergeset_non_daa: None, block_window_for_past_median_time: None, @@ -101,6 +106,10 @@ impl HeaderProcessingContext { pub fn ghostdag_data(&self) -> &Arc { self.ghostdag_data.as_ref().unwrap() } + + pub fn selected_parent_daa_score(&self) -> u64 { + self.selected_parent_daa_score.unwrap() + } } pub struct HeaderProcessor { @@ -114,11 +123,11 @@ pub struct HeaderProcessor { // Config pub(super) genesis: GenesisBlock, pub(super) timestamp_deviation_tolerance: u64, - pub(super) target_time_per_block: u64, - pub(super) max_block_parents: u8, - pub(super) mergeset_size_limit: u64, + pub(super) max_block_parents: ForkedParam, + pub(super) mergeset_size_limit: ForkedParam, pub(super) skip_proof_of_work: bool, pub(super) max_block_level: BlockLevel, + pub(super) crescendo_activation: ForkActivation, // DB db: Arc, @@ -199,13 +208,13 @@ impl HeaderProcessor { task_manager: BlockTaskDependencyManager::new(), pruning_lock, counters, - // TODO (HF): make sure to also pass `new_timestamp_deviation_tolerance` and use according to HF activation score - timestamp_deviation_tolerance: params.timestamp_deviation_tolerance(0), - target_time_per_block: params.target_time_per_block, - max_block_parents: params.max_block_parents, - mergeset_size_limit: params.mergeset_size_limit, + + timestamp_deviation_tolerance: params.timestamp_deviation_tolerance, + max_block_parents: params.max_block_parents(), + mergeset_size_limit: params.mergeset_size_limit(), skip_proof_of_work: params.skip_proof_of_work, max_block_level: params.max_block_level, + crescendo_activation: params.crescendo_activation, } } @@ -298,6 +307,8 @@ impl HeaderProcessor { self.validate_parent_relations(header)?; let mut ctx = self.build_processing_context(header, block_level); self.ghostdag(&mut ctx); + // [Crescendo]: persist the selected parent DAA score to be used for activation checks + ctx.selected_parent_daa_score = Some(self.headers_store.get_daa_score(ctx.ghostdag_data().selected_parent).unwrap()); self.pre_pow_validation(&mut ctx, header)?; if let Err(e) = self.post_pow_validation(&mut ctx, header) { self.statuses_store.write().set(ctx.hash, StatusInvalid).unwrap(); diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 2de19c265d..f638918f27 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -169,7 +169,6 @@ impl PruningProcessor { let current_pruning_info = pruning_point_read.get().unwrap(); let (new_pruning_points, new_candidate) = self.pruning_point_manager.next_pruning_points_and_candidate_by_ghostdag_data( sink_ghostdag_data, - None, current_pruning_info.candidate, current_pruning_info.pruning_point, ); diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 21c347c4df..eb3c697069 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -53,7 +53,10 @@ use kaspa_consensus_core::{ block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, - config::{genesis::GenesisBlock, params::ForkActivation}, + config::{ + genesis::GenesisBlock, + params::{ForkActivation, ForkedParam}, + }, header::Header, merkle::calc_hash_merkle_root, pruning::PruningPointsList, @@ -112,9 +115,8 @@ pub struct VirtualStateProcessor { // Config pub(super) genesis: GenesisBlock, - pub(super) max_block_parents: u8, - pub(super) mergeset_size_limit: u64, - pub(super) pruning_depth: u64, + pub(super) max_block_parents: ForkedParam, + pub(super) mergeset_size_limit: ForkedParam, // Stores pub(super) statuses_store: Arc>, @@ -164,9 +166,8 @@ pub struct VirtualStateProcessor { // Counters counters: Arc, - // Storage mass hardfork DAA score - pub(crate) storage_mass_activation: ForkActivation, - pub(crate) kip10_activation: ForkActivation, + // Crescendo hardfork activation score (used here for activating KIPs 9,10) + pub(crate) crescendo_activation: ForkActivation, } impl VirtualStateProcessor { @@ -191,9 +192,8 @@ impl VirtualStateProcessor { thread_pool, genesis: params.genesis.clone(), - max_block_parents: params.max_block_parents, - mergeset_size_limit: params.mergeset_size_limit, - pruning_depth: params.pruning_depth, + max_block_parents: params.max_block_parents(), + mergeset_size_limit: params.mergeset_size_limit(), db, statuses_store: storage.statuses_store.clone(), @@ -230,8 +230,7 @@ impl VirtualStateProcessor { pruning_lock, notification_root, counters, - storage_mass_activation: params.storage_mass_activation, - kip10_activation: params.kip10_activation, + crescendo_activation: params.crescendo_activation, } } @@ -588,11 +587,11 @@ impl VirtualStateProcessor { /// Returns the max number of tips to consider as virtual parents in a single virtual resolve operation. /// /// Guaranteed to be `>= self.max_block_parents` - fn max_virtual_parent_candidates(&self) -> usize { + fn max_virtual_parent_candidates(&self, max_block_parents: usize) -> usize { // Limit to max_block_parents x 3 candidates. This way we avoid going over thousands of tips when the network isn't healthy. // There's no specific reason for a factor of 3, and its not a consensus rule, just an estimation for reducing the amount // of candidates considered. - self.max_block_parents as usize * 3 + max_block_parents * 3 } /// Searches for the next valid sink block (SINK = Virtual selected parent). The search is performed @@ -680,8 +679,10 @@ impl VirtualStateProcessor { // we might touch such data prior to validating the bounded merge rule. All in all, this function is short // enough so we avoid making further optimizations let _prune_guard = self.pruning_lock.blocking_read(); - let max_block_parents = self.max_block_parents as usize; - let max_candidates = self.max_virtual_parent_candidates(); + let selected_parent_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + let max_block_parents = self.max_block_parents.get(selected_parent_daa_score) as usize; + let mergeset_size_limit = self.mergeset_size_limit.get(selected_parent_daa_score); + let max_candidates = self.max_virtual_parent_candidates(max_block_parents); // Prioritize half the blocks with highest blue work and pick the rest randomly to ensure diversity between nodes if candidates.len() > max_candidates { @@ -710,10 +711,10 @@ impl VirtualStateProcessor { // Try adding parents as long as mergeset size and number of parents limits are not reached while let Some(candidate) = candidates.pop_front() { - if mergeset_size >= self.mergeset_size_limit || virtual_parents.len() >= max_block_parents { + if mergeset_size >= mergeset_size_limit || virtual_parents.len() >= max_block_parents { break; } - match self.mergeset_increase(&virtual_parents, candidate, self.mergeset_size_limit - mergeset_size) { + match self.mergeset_increase(&virtual_parents, candidate, mergeset_size_limit - mergeset_size) { MergesetIncreaseResult::Accepted { increase_size } => { mergeset_size += increase_size; virtual_parents.push(candidate); @@ -729,7 +730,7 @@ impl VirtualStateProcessor { } } } - assert!(mergeset_size <= self.mergeset_size_limit); + assert!(mergeset_size <= mergeset_size_limit); assert!(virtual_parents.len() <= max_block_parents); self.remove_bounded_merge_breaking_parents(virtual_parents, pruning_point) } @@ -1036,7 +1037,7 @@ impl VirtualStateProcessor { let parents_by_level = self.parents_manager.calc_block_parents(pruning_info.pruning_point, &virtual_state.parents); // Hash according to hardfork activation - let storage_mass_activated = self.storage_mass_activation.is_active(virtual_state.daa_score); + let storage_mass_activated = self.crescendo_activation.is_active(virtual_state.daa_score); let hash_merkle_root = calc_hash_merkle_root(txs.iter(), storage_mass_activated); let accepted_id_merkle_root = kaspa_merkle::calc_merkle_root(virtual_state.accepted_tx_ids.iter().copied()); diff --git a/consensus/src/pipeline/virtual_processor/tests.rs b/consensus/src/pipeline/virtual_processor/tests.rs index 20eea9e576..7972a90534 100644 --- a/consensus/src/pipeline/virtual_processor/tests.rs +++ b/consensus/src/pipeline/virtual_processor/tests.rs @@ -68,7 +68,7 @@ impl TestContext { pub fn build_block_template_row(&mut self, nonces: impl Iterator) -> &mut Self { for nonce in nonces { - self.simulated_time += self.consensus.params().target_time_per_block; + self.simulated_time += self.consensus.params().prior_target_time_per_block; self.current_templates.push_back(self.build_block_template(nonce as u64, self.simulated_time)); } self @@ -93,7 +93,7 @@ impl TestContext { pub async fn build_and_insert_disqualified_chain(&mut self, mut parents: Vec, len: usize) -> Hash { // The chain will be disqualified since build_block_with_parents builds utxo-invalid blocks for _ in 0..len { - self.simulated_time += self.consensus.params().target_time_per_block; + self.simulated_time += self.consensus.params().prior_target_time_per_block; let b = self.build_block_with_parents(parents, 0, self.simulated_time); parents = vec![b.header.hash]; self.validate_and_insert_block(b.to_immutable()).await; @@ -174,8 +174,8 @@ async fn antichain_merge_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.max_block_parents = 4; - p.mergeset_size_limit = 10; + p.prior_max_block_parents = 4; + p.prior_mergeset_size_limit = 10; }) .build(); @@ -202,8 +202,8 @@ async fn basic_utxo_disqualified_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.max_block_parents = 4; - p.mergeset_size_limit = 10; + p.prior_max_block_parents = 4; + p.prior_mergeset_size_limit = 10; }) .build(); @@ -234,9 +234,9 @@ async fn double_search_disqualified_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.max_block_parents = 4; - p.mergeset_size_limit = 10; - p.min_difficulty_window_len = p.legacy_difficulty_window_size; + p.prior_max_block_parents = 4; + p.prior_mergeset_size_limit = 10; + p.min_difficulty_window_size = p.prior_difficulty_window_size; }) .build(); let mut ctx = TestContext::new(TestConsensus::new(&config)); diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index f0da0535ed..a3cbb820c1 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -107,27 +107,19 @@ impl VirtualStateProcessor { block_fee += validated_tx.calculated_fee; } - if is_selected_parent { + ctx.mergeset_acceptance_data.push(MergesetBlockAcceptanceData { + block_hash: merged_block, // For the selected parent, we prepend the coinbase tx - ctx.mergeset_acceptance_data.push(MergesetBlockAcceptanceData { - block_hash: merged_block, - accepted_transactions: once(AcceptedTxEntry { transaction_id: validated_coinbase_id, index_within_block: 0 }) - .chain( - validated_transactions - .into_iter() - .map(|(tx, tx_idx)| AcceptedTxEntry { transaction_id: tx.id(), index_within_block: tx_idx }), - ) - .collect(), - }); - } else { - ctx.mergeset_acceptance_data.push(MergesetBlockAcceptanceData { - block_hash: merged_block, - accepted_transactions: validated_transactions - .into_iter() - .map(|(tx, tx_idx)| AcceptedTxEntry { transaction_id: tx.id(), index_within_block: tx_idx }) - .collect(), - }); - } + accepted_transactions: is_selected_parent + .then_some(AcceptedTxEntry { transaction_id: validated_coinbase_id, index_within_block: 0 }) + .into_iter() + .chain( + validated_transactions + .into_iter() + .map(|(tx, tx_idx)| AcceptedTxEntry { transaction_id: tx.id(), index_within_block: tx_idx }), + ) + .collect(), + }); let coinbase_data = self.coinbase_manager.deserialize_coinbase_payload(&txs[0].payload).unwrap(); ctx.mergeset_rewards.insert( @@ -204,6 +196,10 @@ impl VirtualStateProcessor { .expected_coinbase_transaction(daa_score, miner_data, ghostdag_data, mergeset_rewards, mergeset_non_daa) .unwrap() .tx; + // [Crescendo]: we can pass include_mass_field = false here since post activation coinbase mass field + // is guaranteed to be zero (see check_coinbase_has_zero_mass), so after the fork we will be able to + // safely remove the include_mass_field parameter. This is because internally include_mass_field = false + // and mass = 0 are treated the same. if hashing::tx::hash(coinbase, false) != hashing::tx::hash(&expected_coinbase, false) { Err(BadCoinbaseTransaction) } else { @@ -325,18 +321,20 @@ impl VirtualStateProcessor { ) -> TxResult<()> { self.populate_mempool_transaction_in_utxo_context(mutable_tx, utxo_view)?; - // Calc the full contextual mass including storage mass + // Calc the contextual storage mass let contextual_mass = self .transaction_validator .mass_calculator - .calc_tx_overall_mass(&mutable_tx.as_verifiable(), mutable_tx.calculated_compute_mass) + .calc_contextual_masses(&mutable_tx.as_verifiable()) .ok_or(TxRuleError::MassIncomputable)?; // Set the inner mass field - mutable_tx.tx.set_mass(contextual_mass); + mutable_tx.tx.set_mass(contextual_mass.storage_mass); // At this point we know all UTXO entries are populated, so we can safely pass the tx as verifiable - let mass_and_feerate_threshold = args.feerate_threshold.map(|threshold| (contextual_mass, threshold)); + let mass_and_feerate_threshold = args + .feerate_threshold + .map(|threshold| (contextual_mass.max(mutable_tx.calculated_non_contextual_masses.unwrap()), threshold)); let calculated_fee = self.transaction_validator.validate_populated_transaction_and_get_fee( &mutable_tx.as_verifiable(), pov_daa_score, diff --git a/consensus/src/processes/block_depth.rs b/consensus/src/processes/block_depth.rs index 24d948f708..5270824cd7 100644 --- a/consensus/src/processes/block_depth.rs +++ b/consensus/src/processes/block_depth.rs @@ -1,4 +1,4 @@ -use kaspa_consensus_core::blockhash::ORIGIN; +use kaspa_consensus_core::{blockhash::ORIGIN, config::params::ForkedParam}; use kaspa_hashes::Hash; use std::sync::Arc; @@ -7,42 +7,53 @@ use crate::model::{ stores::{ depth::DepthStoreReader, ghostdag::{GhostdagData, GhostdagStoreReader}, + headers::HeaderStoreReader, reachability::ReachabilityStoreReader, }, }; +enum BlockDepthType { + MergeRoot, + Finality, +} + #[derive(Clone)] -pub struct BlockDepthManager { - merge_depth: u64, - finality_depth: u64, +pub struct BlockDepthManager { + merge_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, depth_store: Arc, reachability_service: MTReachabilityService, ghostdag_store: Arc, + headers_store: Arc, } -impl BlockDepthManager { +impl BlockDepthManager { pub fn new( - merge_depth: u64, - finality_depth: u64, + merge_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, depth_store: Arc, reachability_service: MTReachabilityService, ghostdag_store: Arc, + headers_store: Arc, ) -> Self { - Self { merge_depth, finality_depth, genesis_hash, depth_store, reachability_service, ghostdag_store } + Self { merge_depth, finality_depth, genesis_hash, depth_store, reachability_service, ghostdag_store, headers_store } } pub fn calc_merge_depth_root(&self, ghostdag_data: &GhostdagData, pruning_point: Hash) -> Hash { - self.calculate_block_at_depth(ghostdag_data, self.merge_depth, pruning_point) + self.calculate_block_at_depth(ghostdag_data, BlockDepthType::MergeRoot, pruning_point) } pub fn calc_finality_point(&self, ghostdag_data: &GhostdagData, pruning_point: Hash) -> Hash { - self.calculate_block_at_depth(ghostdag_data, self.finality_depth, pruning_point) + self.calculate_block_at_depth(ghostdag_data, BlockDepthType::Finality, pruning_point) } - fn calculate_block_at_depth(&self, ghostdag_data: &GhostdagData, depth: u64, pruning_point: Hash) -> Hash { - assert!(depth == self.merge_depth || depth == self.finality_depth); - + fn calculate_block_at_depth(&self, ghostdag_data: &GhostdagData, depth_type: BlockDepthType, pruning_point: Hash) -> Hash { + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let depth = match depth_type { + BlockDepthType::MergeRoot => self.merge_depth.get(selected_parent_daa_score), + BlockDepthType::Finality => self.finality_depth.get(selected_parent_daa_score), + }; if ghostdag_data.blue_score < depth { return self.genesis_hash; } @@ -57,10 +68,12 @@ impl Bl return ORIGIN; } - let mut current = if depth == self.merge_depth { - self.depth_store.merge_depth_root(ghostdag_data.selected_parent).unwrap() - } else { - self.depth_store.finality_point(ghostdag_data.selected_parent).unwrap() + // [Crescendo]: we start from the depth/finality point of the selected parent. This makes the selection monotonic + // also when the depth increases in the fork activation point. The loop below will simply not progress for a while, + // until a new block above the previous point reaches the *new increased depth*. + let mut current = match depth_type { + BlockDepthType::MergeRoot => self.depth_store.merge_depth_root(ghostdag_data.selected_parent).unwrap(), + BlockDepthType::Finality => self.depth_store.finality_point(ghostdag_data.selected_parent).unwrap(), }; // In this case we expect the pruning point or a block above it to be the block at depth. diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index d67f922c81..36845af937 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -1,5 +1,6 @@ use kaspa_consensus_core::{ coinbase::*, + config::params::ForkedParam, errors::coinbase::{CoinbaseError, CoinbaseResult}, subnets, tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionOutput}, @@ -30,13 +31,16 @@ pub struct CoinbaseManager { max_coinbase_payload_len: usize, deflationary_phase_daa_score: u64, pre_deflationary_phase_base_subsidy: u64, - target_time_per_block: u64, + bps: ForkedParam, - /// Precomputed number of blocks per month - blocks_per_month: u64, + /// Precomputed subsidy by month tables (for before and after the Crescendo hardfork) + subsidy_by_month_table_before: SubsidyByMonthTable, + subsidy_by_month_table_after: SubsidyByMonthTable, - /// Precomputed subsidy by month table - subsidy_by_month_table: SubsidyByMonthTable, + /// The crescendo activation DAA score where BPS increased from 1 to 10. + /// This score is required here long-term (and not only for the actual forking), in + /// order to correctly determine the subsidy month from the live DAA score of the network + crescendo_activation_daa_score: u64, } /// Struct used to streamline payload parsing @@ -63,31 +67,31 @@ impl CoinbaseManager { max_coinbase_payload_len: usize, deflationary_phase_daa_score: u64, pre_deflationary_phase_base_subsidy: u64, - target_time_per_block: u64, + bps: ForkedParam, ) -> Self { - assert!(1000 % target_time_per_block == 0); - let bps = 1000 / target_time_per_block; - let blocks_per_month = SECONDS_PER_MONTH * bps; - // Precomputed subsidy by month table for the actual block per second rate // Here values are rounded up so that we keep the same number of rewarding months as in the original 1 BPS table. // In a 10 BPS network, the induced increase in total rewards is 51 KAS (see tests::calc_high_bps_total_rewards_delta()) - let subsidy_by_month_table: SubsidyByMonthTable = core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps)); + let subsidy_by_month_table_before: SubsidyByMonthTable = + core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps.before())); + let subsidy_by_month_table_after: SubsidyByMonthTable = + core::array::from_fn(|i| SUBSIDY_BY_MONTH_TABLE[i].div_ceil(bps.after())); Self { coinbase_payload_script_public_key_max_len, max_coinbase_payload_len, deflationary_phase_daa_score, pre_deflationary_phase_base_subsidy, - target_time_per_block, - blocks_per_month, - subsidy_by_month_table, + bps, + subsidy_by_month_table_before, + subsidy_by_month_table_after, + crescendo_activation_daa_score: bps.activation().daa_score(), } } #[cfg(test)] #[inline] - pub fn bps(&self) -> u64 { - 1000 / self.target_time_per_block + pub fn bps(&self) -> ForkedParam { + self.bps } pub fn expected_coinbase_transaction>( @@ -113,10 +117,24 @@ impl CoinbaseManager { // Collect all rewards from mergeset reds ∩ DAA window and create a // single output rewarding all to the current block (the "merging" block) let mut red_reward = 0u64; - for red in ghostdag_data.mergeset_reds.iter().filter(|h| !mergeset_non_daa.contains(h)) { - let reward_data = mergeset_rewards.get(red).unwrap(); - red_reward += reward_data.subsidy + reward_data.total_fees; + + // bps activation = crescendo activation + if self.bps.activation().is_active(daa_score) { + for red in ghostdag_data.mergeset_reds.iter() { + let reward_data = mergeset_rewards.get(red).unwrap(); + if mergeset_non_daa.contains(red) { + red_reward += reward_data.total_fees; + } else { + red_reward += reward_data.subsidy + reward_data.total_fees; + } + } + } else { + for red in ghostdag_data.mergeset_reds.iter().filter(|h| !mergeset_non_daa.contains(h)) { + let reward_data = mergeset_rewards.get(red).unwrap(); + red_reward += reward_data.subsidy + reward_data.total_fees; + } } + if red_reward > 0 { outputs.push(TransactionOutput::new(red_reward, miner_data.script_public_key.clone())); } @@ -214,13 +232,31 @@ impl CoinbaseManager { return self.pre_deflationary_phase_base_subsidy; } - let months_since_deflationary_phase_started = - ((daa_score - self.deflationary_phase_daa_score) / self.blocks_per_month) as usize; - if months_since_deflationary_phase_started >= self.subsidy_by_month_table.len() { - *(self.subsidy_by_month_table).last().unwrap() + let subsidy_month = self.subsidy_month(daa_score) as usize; + let subsidy_table = if self.bps.activation().is_active(daa_score) { + &self.subsidy_by_month_table_after } else { - self.subsidy_by_month_table[months_since_deflationary_phase_started] - } + &self.subsidy_by_month_table_before + }; + subsidy_table[subsidy_month.min(subsidy_table.len() - 1)] + } + + /// Get the subsidy month as function of the current DAA score. + /// + /// Note that this function is called only if daa_score >= self.deflationary_phase_daa_score + fn subsidy_month(&self, daa_score: u64) -> u64 { + let seconds_since_deflationary_phase_started = if self.crescendo_activation_daa_score < self.deflationary_phase_daa_score { + (daa_score - self.deflationary_phase_daa_score) / self.bps.after() + } else if daa_score < self.crescendo_activation_daa_score { + (daa_score - self.deflationary_phase_daa_score) / self.bps.before() + } else { + // Else - deflationary_phase <= crescendo_activation <= daa_score. + // Count seconds differently before and after Crescendo activation + (self.crescendo_activation_daa_score - self.deflationary_phase_daa_score) / self.bps.before() + + (daa_score - self.crescendo_activation_daa_score) / self.bps.after() + }; + + seconds_since_deflationary_phase_started / SECONDS_PER_MONTH } #[cfg(test)] @@ -244,7 +280,7 @@ impl CoinbaseManager { /* This table was pre-calculated by calling `calcDeflationaryPeriodBlockSubsidyFloatCalc` (in kaspad-go) for all months until reaching 0 subsidy. To regenerate this table, run `TestBuildSubsidyTable` in coinbasemanager_test.go (note the `deflationaryPhaseBaseSubsidy` therein). - These values apply to 1 block per second. + These values represent the reward per second for each month (= reward per block for 1 BPS). */ #[rustfmt::skip] const SUBSIDY_BY_MONTH_TABLE: [u64; 426] = [ @@ -273,7 +309,7 @@ mod tests { use super::*; use crate::params::MAINNET_PARAMS; use kaspa_consensus_core::{ - config::params::{Params, TESTNET11_PARAMS}, + config::params::{Params, SIMNET_PARAMS}, constants::SOMPI_PER_KASPA, network::NetworkId, tx::scriptvec, @@ -286,13 +322,13 @@ mod tests { let legacy_cbm = create_legacy_manager(); let pre_deflationary_rewards = legacy_cbm.pre_deflationary_phase_base_subsidy * legacy_cbm.deflationary_phase_daa_score; let total_rewards: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| x * SECONDS_PER_MONTH).sum::(); - let testnet_11_bps = TESTNET11_PARAMS.bps(); + let testnet_11_bps = SIMNET_PARAMS.bps().upper_bound(); let total_high_bps_rewards_rounded_up: u64 = pre_deflationary_rewards + SUBSIDY_BY_MONTH_TABLE.iter().map(|x| (x.div_ceil(testnet_11_bps) * testnet_11_bps) * SECONDS_PER_MONTH).sum::(); - let cbm = create_manager(&TESTNET11_PARAMS); - let total_high_bps_rewards: u64 = - pre_deflationary_rewards + cbm.subsidy_by_month_table.iter().map(|x| x * cbm.blocks_per_month).sum::(); + let cbm = create_manager(&SIMNET_PARAMS); + let total_high_bps_rewards: u64 = pre_deflationary_rewards + + cbm.subsidy_by_month_table_before.iter().map(|x| x * SECONDS_PER_MONTH * cbm.bps().before()).sum::(); assert_eq!(total_high_bps_rewards_rounded_up, total_high_bps_rewards, "subsidy adjusted to bps must be rounded up"); let delta = total_high_bps_rewards as i64 - total_rewards as i64; @@ -305,15 +341,23 @@ mod tests { #[test] fn subsidy_by_month_table_test() { let cbm = create_legacy_manager(); - cbm.subsidy_by_month_table.iter().enumerate().for_each(|(i, x)| { + cbm.subsidy_by_month_table_before.iter().enumerate().for_each(|(i, x)| { assert_eq!(SUBSIDY_BY_MONTH_TABLE[i], *x, "for 1 BPS, const table and precomputed values must match"); }); for network_id in NetworkId::iter() { let cbm = create_manager(&network_id.into()); - cbm.subsidy_by_month_table.iter().enumerate().for_each(|(i, x)| { + cbm.subsidy_by_month_table_before.iter().enumerate().for_each(|(i, x)| { + assert_eq!( + SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps().before()), + *x, + "{}: locally computed and precomputed values must match", + network_id + ); + }); + cbm.subsidy_by_month_table_after.iter().enumerate().for_each(|(i, x)| { assert_eq!( - SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps()), + SUBSIDY_BY_MONTH_TABLE[i].div_ceil(cbm.bps().after()), *x, "{}: locally computed and precomputed values must match", network_id @@ -332,10 +376,11 @@ mod tests { for network_id in NetworkId::iter() { let params = &network_id.into(); let cbm = create_manager(params); + let bps = params.bps().upper_bound(); - let pre_deflationary_phase_base_subsidy = PRE_DEFLATIONARY_PHASE_BASE_SUBSIDY / params.bps(); - let deflationary_phase_initial_subsidy = DEFLATIONARY_PHASE_INITIAL_SUBSIDY / params.bps(); - let blocks_per_halving = SECONDS_PER_HALVING * params.bps(); + let pre_deflationary_phase_base_subsidy = PRE_DEFLATIONARY_PHASE_BASE_SUBSIDY / bps; + let deflationary_phase_initial_subsidy = DEFLATIONARY_PHASE_INITIAL_SUBSIDY / bps; + let blocks_per_halving = SECONDS_PER_HALVING * bps; struct Test { name: &'static str, @@ -373,7 +418,7 @@ mod tests { Test { name: "after 32 halvings", daa_score: params.deflationary_phase_daa_score + 32 * blocks_per_halving, - expected: (DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)).div_ceil(cbm.bps()), + expected: (DEFLATIONARY_PHASE_INITIAL_SUBSIDY / 2_u64.pow(32)).div_ceil(cbm.bps().before()), }, Test { name: "just before subsidy depleted", @@ -389,7 +434,7 @@ mod tests { for t in tests { assert_eq!(cbm.calc_block_subsidy(t.daa_score), t.expected, "{} test '{}' failed", network_id, t.name); - if params.bps() == 1 { + if bps == 1 { assert_eq!(cbm.legacy_calc_block_subsidy(t.daa_score), t.expected, "{} test '{}' failed", network_id, t.name); } } @@ -478,12 +523,12 @@ mod tests { params.max_coinbase_payload_len, params.deflationary_phase_daa_score, params.pre_deflationary_phase_base_subsidy, - params.target_time_per_block, + params.bps(), ) } /// Return a CoinbaseManager with legacy golang 1 BPS properties fn create_legacy_manager() -> CoinbaseManager { - CoinbaseManager::new(150, 204, 15778800 - 259200, 50000000000, 1000) + CoinbaseManager::new(150, 204, 15778800 - 259200, 50000000000, ForkedParam::new_const(1)) } } diff --git a/consensus/src/processes/difficulty.rs b/consensus/src/processes/difficulty.rs index a27da68a25..4bf7df18ba 100644 --- a/consensus/src/processes/difficulty.rs +++ b/consensus/src/processes/difficulty.rs @@ -4,16 +4,21 @@ use crate::model::stores::{ headers::HeaderStoreReader, }; use kaspa_consensus_core::{ - config::params::MIN_DIFFICULTY_WINDOW_LEN, + config::params::ForkActivation, errors::difficulty::{DifficultyError, DifficultyResult}, BlockHashSet, BlueWorkType, MAX_WORK_LEVEL, }; +use kaspa_core::warn; +use kaspa_hashes::Hash; use kaspa_math::{Uint256, Uint320}; use std::{ cmp::{max, Ordering}, iter::once_with, ops::Deref, - sync::Arc, + sync::{ + atomic::{AtomicU8, Ordering as AtomicOrdering}, + Arc, + }, }; use super::ghostdag::ordering::SortableBlock; @@ -63,12 +68,11 @@ trait DifficultyManagerExtension { } #[inline] - fn check_min_difficulty_window_len(difficulty_window_size: usize, min_difficulty_window_len: usize) { + fn check_min_difficulty_window_size(difficulty_window_size: usize, min_difficulty_window_size: usize) { assert!( - MIN_DIFFICULTY_WINDOW_LEN <= min_difficulty_window_len && min_difficulty_window_len <= difficulty_window_size, - "min_difficulty_window_len {} is expected to fit within {}..={}", - min_difficulty_window_len, - MIN_DIFFICULTY_WINDOW_LEN, + min_difficulty_window_size <= difficulty_window_size, + "min_difficulty_window_size {} is expected to be <= difficulty_window_size {}", + min_difficulty_window_size, difficulty_window_size ); } @@ -82,7 +86,7 @@ pub struct FullDifficultyManager { genesis_bits: u32, max_difficulty_target: Uint320, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, target_time_per_block: u64, } @@ -92,16 +96,16 @@ impl FullDifficultyManager { genesis_bits: u32, max_difficulty_target: Uint256, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, target_time_per_block: u64, ) -> Self { - Self::check_min_difficulty_window_len(difficulty_window_size, min_difficulty_window_len); + Self::check_min_difficulty_window_size(difficulty_window_size, min_difficulty_window_size); Self { headers_store, genesis_bits, max_difficulty_target: max_difficulty_target.into(), difficulty_window_size, - min_difficulty_window_len, + min_difficulty_window_size, target_time_per_block, } } @@ -132,7 +136,7 @@ impl FullDifficultyManager { let mut difficulty_blocks = self.get_difficulty_blocks(window); // Until there are enough blocks for a valid calculation the difficulty should remain constant. - if difficulty_blocks.len() < self.min_difficulty_window_len { + if difficulty_blocks.len() < self.min_difficulty_window_size { return self.genesis_bits; } @@ -164,38 +168,87 @@ impl DifficultyManagerExtension for FullDifficultyManager< } } +#[derive(Clone)] +struct CrescendoLogger { + steps: Arc, + activation: ForkActivation, +} + +impl CrescendoLogger { + fn new(activation: ForkActivation) -> Self { + Self { steps: Arc::new(AtomicU8::new(Self::ACTIVATE)), activation } + } + + const ACTIVATE: u8 = 0; + const DYNAMIC: u8 = 1; + const FULL: u8 = 2; + + pub fn report_activation_progress(&self, step: u8) -> bool { + if self.steps.compare_exchange(step, step + 1, AtomicOrdering::SeqCst, AtomicOrdering::SeqCst).is_ok() { + match step { + Self::ACTIVATE => { + // TODO (Crescendo): ascii art + warn!("--------- Crescendo hardfork was activated successfully!!! ---------"); + warn!("[Crescendo] Accelerating block rate 10 fold") + } + Self::DYNAMIC => {} + Self::FULL => {} + _ => {} + } + true + } else { + false + } + } +} + /// A difficulty manager implementing [KIP-0004](https://github.com/kaspanet/kips/blob/master/kip-0004.md), /// so based on sampled windows #[derive(Clone)] -pub struct SampledDifficultyManager { +pub struct SampledDifficultyManager { headers_store: Arc, + ghostdag_store: Arc, + genesis_hash: Hash, genesis_bits: u32, max_difficulty_target: Uint320, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, + prior_target_time_per_block: u64, target_time_per_block: u64, + crescendo_activation: ForkActivation, + crescendo_logger: CrescendoLogger, } -impl SampledDifficultyManager { +impl SampledDifficultyManager { + #[allow(clippy::too_many_arguments)] pub fn new( headers_store: Arc, + ghostdag_store: Arc, + genesis_hash: Hash, genesis_bits: u32, max_difficulty_target: Uint256, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, + prior_target_time_per_block: u64, target_time_per_block: u64, + crescendo_activation: ForkActivation, ) -> Self { - Self::check_min_difficulty_window_len(difficulty_window_size, min_difficulty_window_len); + Self::check_min_difficulty_window_size(difficulty_window_size, min_difficulty_window_size); Self { headers_store, + ghostdag_store, + genesis_hash, genesis_bits, max_difficulty_target: max_difficulty_target.into(), difficulty_window_size, - min_difficulty_window_len, + min_difficulty_window_size, difficulty_sample_rate, + prior_target_time_per_block, target_time_per_block, + crescendo_activation, + crescendo_logger: CrescendoLogger::new(crescendo_activation), } } @@ -230,14 +283,51 @@ impl SampledDifficultyManager { (self.internal_calc_daa_score(ghostdag_data, &mergeset_non_daa), mergeset_non_daa) } - pub fn calculate_difficulty_bits(&self, window: &BlockWindowHeap) -> u32 { + pub(crate) fn crescendo_activated(&self, selected_parent: Hash) -> bool { + let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + self.crescendo_activation.is_active(sp_daa_score) + } + + pub fn calculate_difficulty_bits(&self, window: &BlockWindowHeap, ghostdag_data: &GhostdagData) -> u32 { // Note: this fn is duplicated (almost, see `* self.difficulty_sample_rate`) in Full and Sampled structs // so some alternate calculation can be investigated here. let mut difficulty_blocks = self.get_difficulty_blocks(window); // Until there are enough blocks for a valid calculation the difficulty should remain constant. - if difficulty_blocks.len() < self.min_difficulty_window_len { - return self.genesis_bits; + // + // [Crescendo]: post activation special case -- first activated blocks which do not have + // enough activated samples in their past + if difficulty_blocks.len() < self.min_difficulty_window_size { + let selected_parent = ghostdag_data.selected_parent; + if selected_parent == self.genesis_hash { + return self.genesis_bits; + } + + // We will use the selected parent as a source for the difficulty bits + let bits = self.headers_store.get_bits(selected_parent).unwrap(); + + // Check if the selected parent itself is already post crescendo activation (by checking the DAA score + // of its selected parent). We ruled out genesis, so we can safely assume the grandparent exists + if self.crescendo_activated(self.ghostdag_store.get_selected_parent(selected_parent).unwrap()) { + // In this case we simply take the selected parent bits as is + return bits; + } else { + // This indicates we are at the first blocks post activation (i.e., all parents were not activated). + // We use the selected parent target difficulty as baseline and scale it by the target_time_per_block ratio change + let target = Uint320::from(Uint256::from_compact_target_bits(bits)); + let scaled_target = target * self.prior_target_time_per_block / self.target_time_per_block; + let scaled_bits = Uint256::try_from(scaled_target.min(self.max_difficulty_target)).unwrap().compact_target_bits(); + + if self.crescendo_logger.report_activation_progress(CrescendoLogger::ACTIVATE) { + warn!( + "[Crescendo] Block target time change: {} -> {} milliseconds", + self.prior_target_time_per_block, self.target_time_per_block + ); + warn!("[Crescendo] Difficulty target change: {} -> {} ", target, scaled_target); + } + + return scaled_bits; + } } let (min_ts_index, max_ts_index) = difficulty_blocks.iter().position_minmax().into_option().unwrap(); @@ -256,6 +346,21 @@ impl SampledDifficultyManager { let measured_duration = max(max_ts - min_ts, 1); let expected_duration = self.target_time_per_block * self.difficulty_sample_rate * difficulty_blocks_len; // This does differ from FullDifficultyManager version let new_target = average_target * measured_duration / expected_duration; + + if difficulty_blocks_len + 1 < self.difficulty_window_size as u64 + && self.crescendo_logger.report_activation_progress(CrescendoLogger::DYNAMIC) + { + warn!( + "[Crescendo] Dynamic DAA reactivated, scaling the target by the measured/expected duration ratio: +\t\t\t {} -> {} (measured duration: {}, expected duration: {}, ratio {:.4})", + average_target, + new_target, + measured_duration, + expected_duration, + measured_duration as f64 / expected_duration as f64 + ); + } + Uint256::try_from(new_target.min(self.max_difficulty_target)).expect("max target < Uint256::MAX").compact_target_bits() } @@ -264,7 +369,7 @@ impl SampledDifficultyManager { } } -impl DifficultyManagerExtension for SampledDifficultyManager { +impl DifficultyManagerExtension for SampledDifficultyManager { fn headers_store(&self) -> &dyn HeaderStoreReader { self.headers_store.deref() } diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 1032868ee0..999c43de0e 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use kaspa_consensus_core::{ blockhash::{self, BlockHashExtensions, BlockHashes}, + config::params::ForkedParam, BlockHashMap, BlockLevel, BlueWorkType, HashMapCustomHasher, }; use kaspa_hashes::Hash; @@ -24,7 +25,7 @@ use super::ordering::*; #[derive(Clone)] pub struct GhostdagManager { genesis_hash: Hash, - pub(super) k: KType, + pub(super) k: ForkedParam, pub(super) ghostdag_store: Arc, pub(super) relations_store: S, pub(super) headers_store: Arc, @@ -43,7 +44,7 @@ pub struct GhostdagManager GhostdagManager { pub fn new( genesis_hash: Hash, - k: KType, + k: ForkedParam, ghostdag_store: Arc, relations_store: S, headers_store: Arc, @@ -65,7 +66,7 @@ impl Self { Self { genesis_hash, - k, + k: ForkedParam::new_const(k), ghostdag_store, relations_store, reachability_service, @@ -128,13 +129,20 @@ impl, candidate_blue_anticone_size: &mut KType, + k: KType, ) -> ColoringState { // If blue_candidate is in the future of chain_block, it means // that all remaining blues are in the past of chain_block and thus @@ -189,21 +192,28 @@ impl self.k { + if *candidate_blue_anticone_size > k { // k-cluster violation: The candidate's blue anticone exceeded k return ColoringState::Red; } - if *candidate_blues_anticone_sizes.get(&block).unwrap() == self.k { + if peer_blue_anticone_size == k { // k-cluster violation: A block in candidate's blue anticone already // has k blue blocks in its own anticone return ColoringState::Red; @@ -211,7 +221,9 @@ impl= k } ColoringState::Pending @@ -236,14 +248,14 @@ impl ColoringOutput { + fn check_blue_candidate(&self, new_block_data: &GhostdagData, blue_candidate: Hash, k: KType) -> ColoringOutput { // The maximum length of new_block_data.mergeset_blues can be K+1 because // it contains the selected parent. - if new_block_data.mergeset_blues.len() as KType == self.k + 1 { + if new_block_data.mergeset_blues.len() as KType == k + 1 { return ColoringOutput::Red; } - let mut candidate_blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(self.k as usize); + let mut candidate_blues_anticone_sizes: BlockHashMap = BlockHashMap::with_capacity(k as usize); // Iterate over all blocks in the blue past of the new block that are not in the past // of blue_candidate, and check for each one of them if blue_candidate potentially // enlarges their blue anticone to be over K, or that they enlarge the blue anticone @@ -258,6 +270,7 @@ impl SampledPastMedianTimeManager { Self { headers_store, genesis_timestamp } } - pub fn calc_past_median_time(&self, window: &BlockWindowHeap) -> Result { + pub fn calc_past_median_time(&self, window: &BlockWindowHeap, selected_parent: Hash) -> Result { // The past median time is actually calculated taking the average of the 11 values closest to the center // of the sorted timestamps const AVERAGE_FRAME_SIZE: usize = 11; + /* + + [Crescendo]: In the first moments post activation the median time window will be empty or smaller than expected. + Which means that past median time will be closer to current time and less flexible. This is ok since + BBT makes sure to respect this lower bound. The following alternatives were considered and ruled out: + + 1. fill the window with non activated blocks as well, this means the sampled window will go 10x + time back (~45 minutes), so the timestamp for the first blocks post activation can go ~22 + minutes back (if abused). The result for DAA can be further temporary acceleration beyond + the new desired BPS (window duration will be much longer than expected hence difficulty will + go down further). + + 2. sampling the window before and after the activation with different corresponding sample rates. This approach + is ruled out due to complexity, and because the proposed (simpler) solution has no significant drawbacks. + + With the proposed solution, the worst case scenario can be forcing the last blocks pre-activation to a timestamp + which is timestamp_deviation_tolerance seconds in the future (~2 minutes), which will force the first blocks post + activation to this timestamp as well. However, this will only slightly smooth out the block rate transition. + */ + if window.is_empty() { - return Ok(self.genesis_timestamp); + // [Crescendo]: this indicates we are in the few seconds post activation where the window is + // still empty, simply take the selected parent timestamp + return Ok(self.headers_store.get_timestamp(selected_parent).unwrap()); } let mut window_timestamps: Vec = diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 5916df74d7..75d3426496 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -12,6 +12,7 @@ use crate::model::{ reachability::ReachabilityStoreReader, }, }; +use kaspa_consensus_core::{blockhash::BlockHashExtensions, config::params::ForkedParam}; use kaspa_hashes::Hash; use parking_lot::RwLock; @@ -23,8 +24,8 @@ pub struct PruningPointManager< V: PastPruningPointsStoreReader, W: HeadersSelectedTipStoreReader, > { - pruning_depth: u64, - finality_depth: u64, + pruning_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, reachability_service: MTReachabilityService, @@ -43,8 +44,8 @@ impl< > PruningPointManager { pub fn new( - pruning_depth: u64, - finality_depth: u64, + pruning_depth: ForkedParam, + finality_depth: ForkedParam, genesis_hash: Hash, reachability_service: MTReachabilityService, ghostdag_store: Arc, @@ -52,6 +53,11 @@ impl< past_pruning_points_store: Arc, header_selected_tip_store: Arc>, ) -> Self { + // [Crescendo]: These conditions ensure that blue score points with the same finality score before + // the fork will remain with the same finality score post the fork. See below for the usage. + assert!(finality_depth.before() <= finality_depth.after()); + assert!(finality_depth.after() % finality_depth.before() == 0); + assert!(pruning_depth.before() <= pruning_depth.after()); Self { pruning_depth, finality_depth, @@ -67,47 +73,93 @@ impl< pub fn next_pruning_points_and_candidate_by_ghostdag_data( &self, ghostdag_data: CompactGhostdagData, - suggested_low_hash: Option, current_candidate: Hash, current_pruning_point: Hash, ) -> (Vec, Hash) { - let low_hash = match suggested_low_hash { - Some(suggested) => { - if !self.reachability_service.is_chain_ancestor_of(suggested, current_candidate) { - assert!(self.reachability_service.is_chain_ancestor_of(current_candidate, suggested)); - suggested - } else { - current_candidate - } - } - None => current_candidate, - }; + // Handle the edge case where sink is genesis + if ghostdag_data.selected_parent.is_origin() { + return (vec![], current_candidate); + } + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); + let finality_depth = self.finality_depth.get(selected_parent_daa_score); + self.next_pruning_points_and_candidate_by_ghostdag_data_inner( + ghostdag_data, + current_candidate, + current_pruning_point, + pruning_depth, + finality_depth, + ) + } + /// Returns the next pruning points and an updated pruning point candidate given the current + /// pruning point (P), a current candidate (C) and a target block B (represented by GD data). + /// + /// The pruning point candidate C is a moving block which usually has pruning depth from sink but + /// its finality score is still equal to P. It serves as an optimal starting point for searching + /// up rather then restarting the search from P each time. + /// + /// Assumptions: P ∈ chain(C), C ∈ chain(B), P and C have the same finality score + /// + /// Returns: new pruning points ordered from bottom up and an updated candidate + fn next_pruning_points_and_candidate_by_ghostdag_data_inner( + &self, + ghostdag_data: CompactGhostdagData, + current_candidate: Hash, + current_pruning_point: Hash, + pruning_depth: u64, + finality_depth: u64, + ) -> (Vec, Hash) { // If the pruning point is more out of date than that, an IBD with headers proof is needed anyway. - let mut new_pruning_points = Vec::with_capacity((self.pruning_depth / self.finality_depth) as usize); + let mut new_pruning_points = Vec::with_capacity((pruning_depth / finality_depth) as usize); let mut latest_pruning_point_bs = self.ghostdag_store.get_blue_score(current_pruning_point).unwrap(); - if latest_pruning_point_bs + self.pruning_depth > ghostdag_data.blue_score { + if latest_pruning_point_bs + pruning_depth > ghostdag_data.blue_score { // The pruning point is not in depth of self.pruning_depth, so there's // no point in checking if it is required to update it. This can happen - // because the virtual is not updated after IBD, so the pruning point + // because virtual is not immediately updated during IBD, so the pruning point // might be in depth less than self.pruning_depth. return (vec![], current_candidate); } let mut new_candidate = current_candidate; - for selected_child in self.reachability_service.forward_chain_iterator(low_hash, ghostdag_data.selected_parent, true) { + /* + [Crescendo] + + Notation: + P = pruning point + C = candidate + F0 = the finality depth before the fork + F1 = the finality depth after the fork + + Property 1: F0 <= F1 AND F1 % F0 == 0 (validated in Self::new) + + Remark 1: if P,C had the same finality score with regard to F0, they have the same finality score also with regard to F1 + + Proof by picture (based on Property 1): + F0: [ 0 ] [ 1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] ... [ 9 ] ... + F1: [ 0 ] [ 1 ] ... + + (each row divides the blue score space into finality score buckets with F0 or F1 numbers in each bucket correspondingly) + + This means we can safely begin the search from C even in the few moments post the fork (i.e., there's no fear of needing to "pull" C back) + + Note that overall this search is guaranteed to provide the desired monotonicity described in KIP-14: + https://github.com/kaspanet/kips/blob/master/kip-0014.md#pruning-point-adjustment + */ + for selected_child in self.reachability_service.forward_chain_iterator(current_candidate, ghostdag_data.selected_parent, true) + { let selected_child_bs = self.ghostdag_store.get_blue_score(selected_child).unwrap(); - if ghostdag_data.blue_score - selected_child_bs < self.pruning_depth { + if ghostdag_data.blue_score - selected_child_bs < pruning_depth { break; } new_candidate = selected_child; let new_candidate_bs = selected_child_bs; - if self.finality_score(new_candidate_bs) > self.finality_score(latest_pruning_point_bs) { + if self.finality_score(new_candidate_bs, finality_depth) > self.finality_score(latest_pruning_point_bs, finality_depth) { new_pruning_points.push(new_candidate); latest_pruning_point_bs = new_candidate_bs; } @@ -116,10 +168,32 @@ impl< (new_pruning_points, new_candidate) } - // finality_score is the number of finality intervals passed since - // the given block. - fn finality_score(&self, blue_score: u64) -> u64 { - blue_score / self.finality_depth + /// finality_score is the number of finality intervals which have passed since + /// genesis and up to the given blue_score. + fn finality_score(&self, blue_score: u64, finality_depth: u64) -> u64 { + blue_score / finality_depth + } + + fn expected_header_pruning_point_inner( + &self, + ghostdag_data: CompactGhostdagData, + current_candidate: Hash, + current_pruning_point: Hash, + pruning_depth: u64, + finality_depth: u64, + ) -> Hash { + self.next_pruning_points_and_candidate_by_ghostdag_data_inner( + ghostdag_data, + current_candidate, + current_pruning_point, + pruning_depth, + finality_depth, + ) + .0 + .iter() + .last() + .copied() + .unwrap_or(current_pruning_point) } pub fn expected_header_pruning_point(&self, ghostdag_data: CompactGhostdagData, pruning_info: PruningPointInfo) -> Hash { @@ -127,10 +201,14 @@ impl< return self.genesis_hash; } + let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); + let pruning_depth = self.pruning_depth.get(selected_parent_daa_score); + let finality_depth = self.finality_depth.get(selected_parent_daa_score); + let (current_pruning_point, current_candidate, current_pruning_point_index) = pruning_info.decompose(); - let sp_header_pp = self.headers_store.get_header(ghostdag_data.selected_parent).unwrap().pruning_point; - let sp_header_pp_blue_score = self.headers_store.get_blue_score(sp_header_pp).unwrap(); + let sp_pp = self.headers_store.get_header(ghostdag_data.selected_parent).unwrap().pruning_point; + let sp_pp_blue_score = self.headers_store.get_blue_score(sp_pp).unwrap(); // If the block doesn't have the pruning in its selected chain we know for sure that it can't trigger a pruning point // change (we check the selected parent to take care of the case where the block is the virtual which doesn't have reachability data). @@ -140,35 +218,70 @@ impl< // Note: the pruning point from the POV of the current block is the first block in its chain that is in depth of self.pruning_depth and // its finality score is greater than the previous pruning point. This is why if the diff between finality_score(selected_parent.blue_score + 1) * finality_interval // and the current block blue score is less than self.pruning_depth we can know for sure that this block didn't trigger a pruning point change. - let min_required_blue_score_for_next_pruning_point = (self.finality_score(sp_header_pp_blue_score) + 1) * self.finality_depth; + let min_required_blue_score_for_next_pruning_point = + (self.finality_score(sp_pp_blue_score, finality_depth) + 1) * finality_depth; let next_or_current_pp = if has_pruning_point_in_its_selected_chain - && min_required_blue_score_for_next_pruning_point + self.pruning_depth <= ghostdag_data.blue_score + && min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdag_data.blue_score { // If the selected parent pruning point is in the future of current global pruning point, then provide it as a suggestion - let suggested_low_hash = self - .reachability_service - .is_dag_ancestor_of_result(current_pruning_point, sp_header_pp) - .unwrap_option() - .and_then(|b| if b { Some(sp_header_pp) } else { None }); - let (new_pruning_points, _) = self.next_pruning_points_and_candidate_by_ghostdag_data( - ghostdag_data, - suggested_low_hash, - current_candidate, - current_pruning_point, - ); - - new_pruning_points.last().copied().unwrap_or(current_pruning_point) + let sp_pp_in_global_pp_future = + self.reachability_service.is_dag_ancestor_of_result(current_pruning_point, sp_pp).unwrap_option().is_some_and(|b| b); + + /* + Notation: + P = global pruning point + C = global candidate + B = current block (can be virtual) + S = B's selected parent + R = S's pruning point + F = the finality depth + */ + + let (pp, cc) = if sp_pp_in_global_pp_future { + if self.reachability_service.is_chain_ancestor_of(sp_pp, current_candidate) { + // R ∈ future(P), R ∈ chain(C): use R as pruning point and C as candidate + // There are two cases: (i) C is not deep enough from B, R will be returned + // (ii) C is deep enough and the search will start from it, possibly finding a new pruning point for B + (sp_pp, current_candidate) + } else { + // R ∈ future(P), R ∉ chain(C): Use R as candidate as well. + // This might require a long walk up from R (bounded by F), however it is highly unlikely since it + // requires a ~pruning depth deep parallel chain + (sp_pp, sp_pp) + } + } else if self.reachability_service.is_chain_ancestor_of(current_candidate, ghostdag_data.selected_parent) { + // R ∉ future(P), P,C ∈ chain(B) + (current_pruning_point, current_candidate) + } else { + // R ∉ future(P), P ∈ chain(B), C ∉ chain(B) + (current_pruning_point, current_pruning_point) + }; + + self.expected_header_pruning_point_inner(ghostdag_data, cc, pp, pruning_depth, finality_depth) } else { - sp_header_pp + sp_pp }; - if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, next_or_current_pp) { + // [Crescendo]: shortly after fork activation, R is not guaranteed to comply with the new + // increased pruning depth, so we must manually verify not to go below it + if sp_pp_blue_score >= self.headers_store.get_blue_score(next_or_current_pp).unwrap() { + return sp_pp; + } + + if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, next_or_current_pp, pruning_depth) { return next_or_current_pp; } for i in (0..=current_pruning_point_index).rev() { let past_pp = self.past_pruning_points_store.get(i).unwrap(); - if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, past_pp) { + + // [Crescendo]: shortly after fork activation, R is not guaranteed to comply with the new + // increased pruning depth, so we must manually verify not to go below it + if sp_pp_blue_score >= self.headers_store.get_blue_score(past_pp).unwrap() { + return sp_pp; + } + + if self.is_pruning_point_in_pruning_depth(ghostdag_data.blue_score, past_pp, pruning_depth) { return past_pp; } } @@ -176,9 +289,9 @@ impl< self.genesis_hash } - fn is_pruning_point_in_pruning_depth(&self, pov_blue_score: u64, pruning_point: Hash) -> bool { + fn is_pruning_point_in_pruning_depth(&self, pov_blue_score: u64, pruning_point: Hash, pruning_depth: u64) -> bool { let pp_bs = self.headers_store.get_blue_score(pruning_point).unwrap(); - pov_blue_score >= pp_bs + self.pruning_depth + pov_blue_score >= pp_bs + pruning_depth } pub fn is_valid_pruning_point(&self, pp_candidate: Hash, hst: Hash) -> bool { @@ -190,7 +303,12 @@ impl< } let hst_bs = self.ghostdag_store.get_blue_score(hst).unwrap(); - self.is_pruning_point_in_pruning_depth(hst_bs, pp_candidate) + // [Crescendo]: for new nodes syncing right after the fork, it might be difficult to determine whether the + // full new pruning depth is expected, so we use the DAA score of the pruning point itself as an indicator. + // This means that in the first few days following the fork we err on the side of a shorter period which is + // a weaker requirement + let pruning_depth = self.pruning_depth.get(self.headers_store.get_daa_score(pp_candidate).unwrap()); + self.is_pruning_point_in_pruning_depth(hst_bs, pp_candidate, pruning_depth) } pub fn are_pruning_points_in_valid_chain(&self, pruning_info: PruningPointInfo, hst: Hash) -> bool { @@ -257,8 +375,3 @@ impl< true } } - -#[cfg(test)] -mod tests { - // TODO: add unit-tests for next_pruning_point_and_candidate_by_block_hash and expected_header_pruning_point -} diff --git a/consensus/src/processes/pruning_proof/build.rs b/consensus/src/processes/pruning_proof/build.rs index 664eb5981b..b10d86f134 100644 --- a/consensus/src/processes/pruning_proof/build.rs +++ b/consensus/src/processes/pruning_proof/build.rs @@ -5,7 +5,7 @@ use kaspa_consensus_core::{ blockhash::{BlockHashExtensions, BlockHashes}, header::Header, pruning::PruningPointProof, - BlockHashSet, BlockLevel, HashMapCustomHasher, + BlockHashSet, BlockLevel, HashMapCustomHasher, KType, }; use kaspa_core::debug; use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions, DB}; @@ -285,6 +285,7 @@ impl PruningProofManager { &ghostdag_store, Some(block_at_depth_m_at_next_level), level, + self.ghostdag_k.get(pp_header.header.daa_score), ); // Step 4 - Check if we actually have enough depth. @@ -325,6 +326,7 @@ impl PruningProofManager { ghostdag_store: &Arc, required_block: Option, level: BlockLevel, + ghostdag_k: KType, ) -> bool { let relations_service = RelationsStoreInFutureOfRoot { relations_store: self.level_relations_services[level as usize].clone(), @@ -333,7 +335,7 @@ impl PruningProofManager { }; let gd_manager = GhostdagManager::with_level( root, - self.ghostdag_k, + ghostdag_k, ghostdag_store.clone(), relations_service.clone(), self.headers_store.clone(), diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index a9412bbf60..728f8eca8e 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -16,6 +16,7 @@ use rocksdb::WriteBatch; use kaspa_consensus_core::{ blockhash::{self, BlockHashExtensions}, + config::params::ForkedParam, errors::consensus::{ConsensusError, ConsensusResult}, header::Header, pruning::{PruningPointProof, PruningPointTrustedData}, @@ -121,8 +122,8 @@ pub struct PruningProofManager { max_block_level: BlockLevel, genesis_hash: Hash, pruning_proof_m: u64, - anticone_finalization_depth: u64, - ghostdag_k: KType, + anticone_finalization_depth: ForkedParam, + ghostdag_k: ForkedParam, is_consensus_exiting: Arc, } @@ -140,8 +141,8 @@ impl PruningProofManager { max_block_level: BlockLevel, genesis_hash: Hash, pruning_proof_m: u64, - anticone_finalization_depth: u64, - ghostdag_k: KType, + anticone_finalization_depth: ForkedParam, + ghostdag_k: ForkedParam, is_consensus_exiting: Arc, ) -> Self { Self { @@ -244,10 +245,10 @@ impl PruningProofManager { /// the search is halted and a partial chain is returned. /// /// The returned hashes are guaranteed to have GHOSTDAG data - pub(crate) fn get_ghostdag_chain_k_depth(&self, hash: Hash) -> Vec { - let mut hashes = Vec::with_capacity(self.ghostdag_k as usize + 1); + pub(crate) fn get_ghostdag_chain_k_depth(&self, hash: Hash, ghostdag_k: KType) -> Vec { + let mut hashes = Vec::with_capacity(ghostdag_k as usize + 1); let mut current = hash; - for _ in 0..=self.ghostdag_k { + for _ in 0..=ghostdag_k { hashes.push(current); let Some(parent) = self.ghostdag_store.get_selected_parent(current).unwrap_option() else { break; @@ -275,6 +276,10 @@ impl PruningProofManager { let mut daa_window_blocks = BlockHashMap::new(); let mut ghostdag_blocks = BlockHashMap::new(); + // [Crescendo]: get ghostdag k based on the pruning point's DAA score. The off-by-one of not going by selected parent + // DAA score is not important here as we simply increase K one block earlier which is more conservative (saving/sending more data) + let ghostdag_k = self.ghostdag_k.get(self.headers_store.get_daa_score(pruning_point).unwrap()); + // PRUNE SAFETY: called either via consensus under the prune guard or by the pruning processor (hence no pruning in parallel) for anticone_block in anticone.iter().copied() { @@ -291,7 +296,7 @@ impl PruningProofManager { } } - let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block); + let ghostdag_chain = self.get_ghostdag_chain_k_depth(anticone_block, ghostdag_k); for hash in ghostdag_chain { if let Entry::Vacant(e) = ghostdag_blocks.entry(hash) { let ghostdag = self.ghostdag_store.get_data(hash).unwrap(); @@ -369,8 +374,12 @@ impl PruningProofManager { let virtual_state = self.virtual_stores.read().state.get().unwrap(); let pp_bs = self.headers_store.get_blue_score(pp).unwrap(); + // [Crescendo]: use pruning point DAA score for activation. This means that only after sufficient time + // post activation we will require the increased finalization depth + let pruning_point_daa_score = self.headers_store.get_daa_score(pp).unwrap(); + // The anticone is considered final only if the pruning point is at sufficient depth from virtual - if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth { + if virtual_state.ghostdag_data.blue_score >= pp_bs + self.anticone_finalization_depth.get(pruning_point_daa_score) { let anticone = Arc::new(self.calculate_pruning_point_anticone_and_trusted_data(pp, virtual_state.parents.iter().copied())); cache_lock.replace(CachedPruningPointData { pruning_point: pp, data: anticone.clone() }); Ok(anticone) diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs index 3262b65901..8e5c5ef23a 100644 --- a/consensus/src/processes/pruning_proof/validate.rs +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -173,6 +173,10 @@ impl PruningProofManager { return Err(PruningImportError::PruningProofNotEnoughHeaders); } + // [Crescendo]: decide on ghostdag K based on proof pruning point DAA score + let proof_pp_daa_score = proof[0].last().expect("checked if empty").daa_score; + let ghostdag_k = self.ghostdag_k.get(proof_pp_daa_score); + let headers_estimate = self.estimate_proof_unique_size(proof); let (db_lifetime, db) = kaspa_database::create_temp_db!(ConnBuilder::default().with_files_limit(10)); @@ -199,7 +203,7 @@ impl PruningProofManager { .map(|(level, ghostdag_store)| { GhostdagManager::with_level( self.genesis_hash, - self.ghostdag_k, + ghostdag_k, ghostdag_store, relations_stores[level].clone(), headers_store.clone(), diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 839e48a9ef..4b39216c01 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -1,7 +1,10 @@ use std::{cmp::min, ops::Deref, sync::Arc}; use itertools::Itertools; -use kaspa_consensus_core::errors::sync::{SyncManagerError, SyncManagerResult}; +use kaspa_consensus_core::{ + config::params::ForkedParam, + errors::sync::{SyncManagerError, SyncManagerResult}, +}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_math::uint::malachite_base::num::arithmetic::traits::CeilingLogBase2; @@ -28,7 +31,7 @@ pub struct SyncManager< X: PruningStoreReader, Y: StatusesStoreReader, > { - mergeset_size_limit: usize, + mergeset_size_limit: ForkedParam, reachability_service: MTReachabilityService, traversal_manager: DagTraversalManager, ghostdag_store: Arc, @@ -49,7 +52,7 @@ impl< > SyncManager { pub fn new( - mergeset_size_limit: usize, + mergeset_size_limit: ForkedParam, reachability_service: MTReachabilityService, traversal_manager: DagTraversalManager, ghostdag_store: Arc, @@ -75,7 +78,7 @@ impl< /// because it returns blocks with MergeSet granularity, so if MergeSet > max_blocks, the function will return nothing which is undesired behavior. pub fn antipast_hashes_between(&self, low: Hash, high: Hash, max_blocks: Option) -> (Vec, Hash) { let max_blocks = max_blocks.unwrap_or(usize::MAX); - assert!(max_blocks >= self.mergeset_size_limit); + assert!(max_blocks >= self.mergeset_size_limit.upper_bound() as usize); // If low is not in the chain of high - forward_chain_iterator will fail. // Therefore, we traverse down low's chain until we reach a block that is in diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 519a196f82..0b1f0aa004 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -4,8 +4,6 @@ pub mod tx_validation_in_isolation; pub mod tx_validation_in_utxo_context; use std::sync::Arc; -use crate::model::stores::ghostdag; - use kaspa_txscript::{ caches::{Cache, TxScriptCacheCounters}, SigCacheKey, @@ -19,52 +17,38 @@ pub struct TransactionValidator { max_tx_outputs: usize, max_signature_script_len: usize, max_script_public_key_len: usize, - ghostdag_k: ghostdag::KType, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, sig_cache: Cache, pub(crate) mass_calculator: MassCalculator, - /// Storage mass hardfork DAA score - storage_mass_activation: ForkActivation, - /// KIP-10 hardfork DAA score - kip10_activation: ForkActivation, - payload_activation: ForkActivation, - runtime_sig_op_counting: ForkActivation, + /// Crescendo hardfork activation score. Activates KIPs 9, 10, 14 + crescendo_activation: ForkActivation, } impl TransactionValidator { - #[allow(clippy::too_many_arguments)] pub fn new( max_tx_inputs: usize, max_tx_outputs: usize, max_signature_script_len: usize, max_script_public_key_len: usize, - ghostdag_k: ghostdag::KType, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, counters: Arc, mass_calculator: MassCalculator, - storage_mass_activation: ForkActivation, - kip10_activation: ForkActivation, - payload_activation: ForkActivation, - runtime_sig_op_counting: ForkActivation, + crescendo_activation: ForkActivation, ) -> Self { Self { max_tx_inputs, max_tx_outputs, max_signature_script_len, max_script_public_key_len, - ghostdag_k, coinbase_payload_script_public_key_max_len, coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator, - storage_mass_activation, - kip10_activation, - payload_activation, - runtime_sig_op_counting, + crescendo_activation, } } @@ -73,7 +57,6 @@ impl TransactionValidator { max_tx_outputs: usize, max_signature_script_len: usize, max_script_public_key_len: usize, - ghostdag_k: ghostdag::KType, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, counters: Arc, @@ -83,15 +66,11 @@ impl TransactionValidator { max_tx_outputs, max_signature_script_len, max_script_public_key_len, - ghostdag_k, coinbase_payload_script_public_key_max_len, coinbase_maturity, sig_cache: Cache::with_counters(10_000, counters), mass_calculator: MassCalculator::new(0, 0, 0, 0), - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo_activation: ForkActivation::never(), } } } diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs index 129627c59d..5fe2b383af 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_header_context.rs @@ -90,7 +90,7 @@ impl TransactionValidator { fn check_transaction_payload(&self, tx: &Transaction, ctx_daa_score: u64) -> TxResult<()> { // TODO (post HF): move back to in isolation validation - if self.payload_activation.is_active(ctx_daa_score) { + if self.crescendo_activation.is_active(ctx_daa_score) { Ok(()) } else { if !tx.is_coinbase() && !tx.payload.is_empty() { diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index b509a71c72..224d0ce72c 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -35,17 +35,23 @@ impl TransactionValidator { self.check_transaction_script_public_keys(tx) } - fn check_coinbase_in_isolation(&self, tx: &kaspa_consensus_core::tx::Transaction) -> TxResult<()> { + fn check_coinbase_in_isolation(&self, tx: &Transaction) -> TxResult<()> { if !tx.is_coinbase() { return Ok(()); } if !tx.inputs.is_empty() { return Err(TxRuleError::CoinbaseHasInputs(tx.inputs.len())); } + + /* + [Crescendo]: moved this specific check to body_validation_in_context since it depends on fork activation + TODO (post HF): move back here + let outputs_limit = self.ghostdag_k as u64 + 2; if tx.outputs.len() as u64 > outputs_limit { return Err(TxRuleError::CoinbaseTooManyOutputs(tx.outputs.len(), outputs_limit)); } + */ for (i, output) in tx.outputs.iter().enumerate() { if output.script_public_key.script().len() > self.coinbase_payload_script_public_key_max_len as usize { return Err(TxRuleError::CoinbaseScriptPublicKeyTooLong(i)); @@ -175,7 +181,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs index 5c74bf07b5..9a7df9fd04 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_utxo_context.rs @@ -3,7 +3,6 @@ use kaspa_consensus_core::{ hashing::sighash::{SigHashReusedValuesSync, SigHashReusedValuesUnsync}, tx::{TransactionInput, VerifiableTransaction}, }; -use kaspa_core::warn; use kaspa_txscript::{caches::Cache, get_sig_op_count_upper_bound, SigCacheKey, TxScriptEngine}; use kaspa_txscript_errors::TxScriptError; use rayon::iter::{IntoParallelIterator, ParallelIterator}; @@ -43,13 +42,14 @@ impl TransactionValidator { let total_in = self.check_transaction_input_amounts(tx)?; let total_out = Self::check_transaction_output_values(tx, total_in)?; let fee = total_in - total_out; - if flags != TxValidationFlags::SkipMassCheck && self.storage_mass_activation.is_active(pov_daa_score) { + if flags != TxValidationFlags::SkipMassCheck && self.crescendo_activation.is_active(pov_daa_score) { // Storage mass hardfork was activated self.check_mass_commitment(tx)?; - if self.storage_mass_activation.is_within_range_from_activation(pov_daa_score, 10) { - warn!("--------- Storage mass hardfork was activated successfully!!! --------- (DAA score: {})", pov_daa_score); - } + // TODO (crescendo): log in one central location + // if self.storage_mass_activation.is_within_range_from_activation(pov_daa_score, 10) { + // kaspa_core::warn!("--------- Storage mass hardfork was activated successfully!!! --------- (DAA score: {})", pov_daa_score); + // } } Self::check_sequence_lock(tx, pov_daa_score)?; @@ -59,7 +59,7 @@ impl TransactionValidator { match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { - if !self.runtime_sig_op_counting.is_active(pov_daa_score) { + if !self.crescendo_activation.is_active(pov_daa_score) { Self::check_sig_op_counts(tx)?; } self.check_scripts(tx, pov_daa_score)?; @@ -127,7 +127,8 @@ impl TransactionValidator { } fn check_mass_commitment(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let calculated_contextual_mass = self.mass_calculator.calc_tx_overall_mass(tx, None).ok_or(TxRuleError::MassIncomputable)?; + let calculated_contextual_mass = + self.mass_calculator.calc_contextual_masses(tx).ok_or(TxRuleError::MassIncomputable)?.storage_mass; let committed_contextual_mass = tx.tx().mass(); if committed_contextual_mass != calculated_contextual_mass { return Err(TxRuleError::WrongMass(calculated_contextual_mass, committed_contextual_mass)); @@ -177,8 +178,8 @@ impl TransactionValidator { check_scripts( &self.sig_cache, tx, - self.kip10_activation.is_active(pov_daa_score), - self.runtime_sig_op_counting.is_active(pov_daa_score), + self.crescendo_activation.is_active(pov_daa_score), + self.crescendo_activation.is_active(pov_daa_score), ) } } @@ -280,7 +281,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -349,7 +349,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -422,7 +421,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -492,7 +490,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -562,7 +559,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -632,7 +628,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -701,7 +696,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), @@ -764,7 +758,6 @@ mod tests { params.max_tx_outputs, params.max_signature_script_len, params.max_script_public_key_len, - params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, Default::default(), diff --git a/consensus/src/processes/window.rs b/consensus/src/processes/window.rs index 1caff9c007..9574a4eeb2 100644 --- a/consensus/src/processes/window.rs +++ b/consensus/src/processes/window.rs @@ -112,7 +112,7 @@ impl Self { let difficulty_manager = FullDifficultyManager::new( @@ -120,7 +120,7 @@ impl, block_window_cache_for_past_median_time: Arc, target_time_per_block: u64, - sampling_activation: ForkActivation, + crescendo_activation: ForkActivation, difficulty_window_size: usize, difficulty_sample_rate: u64, past_median_time_window_size: usize, past_median_time_sample_rate: u64, - difficulty_manager: SampledDifficultyManager, + difficulty_manager: SampledDifficultyManager, past_median_time_manager: SampledPastMedianTimeManager, } @@ -326,22 +326,27 @@ impl, block_window_cache_for_past_median_time: Arc, max_difficulty_target: Uint256, + prior_target_time_per_block: u64, target_time_per_block: u64, - sampling_activation: ForkActivation, + crescendo_activation: ForkActivation, difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, past_median_time_window_size: usize, past_median_time_sample_rate: u64, ) -> Self { let difficulty_manager = SampledDifficultyManager::new( headers_store.clone(), + ghostdag_store.clone(), + genesis.hash, genesis.bits, max_difficulty_target, difficulty_window_size, - min_difficulty_window_len, + min_difficulty_window_size, difficulty_sample_rate, + prior_target_time_per_block, target_time_per_block, + crescendo_activation, ); let past_median_time_manager = SampledPastMedianTimeManager::new(headers_store.clone(), genesis.timestamp); Self { @@ -352,7 +357,7 @@ impl bool { + let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); + self.crescendo_activation.is_active(sp_daa_score) + } + fn build_block_window( &self, ghostdag_data: &GhostdagData, @@ -384,6 +394,28 @@ impl Some(&self.block_window_cache_for_difficulty), WindowType::MedianTimeWindow => Some(&self.block_window_cache_for_past_median_time), @@ -404,7 +436,10 @@ impl); - + self.push_mergeset( + &mut &mut window_heap, + sample_rate, + ¤t_ghostdag, + parent_ghostdag.blue_work, + None::, + filter_non_activated, + ); + + // [Crescendo]: the chain ancestor window will be in the cache only if it was + // activated (due to tracking of window origin), so we can safely inherit it + // // see if we can inherit and merge with the selected parent cache if self.try_merge_with_selected_parent_cache(&mut window_heap, &cache, ¤t_ghostdag.selected_parent) { // if successful, we may break out of the loop, with the window already filled. @@ -469,10 +522,11 @@ impl, + filter_non_activated: bool, ) { if let Some(mut mergeset_non_daa_inserter) = mergeset_non_daa_inserter { - // If we have a non-daa inserter, we most iterate over the whole mergeset and op the sampled and non-daa blocks. - for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work) { + // If we have a non-daa inserter, we must iterate over the whole mergeset and operate on the sampled and non-daa blocks. + for block in self.sampled_mergeset_iterator(sample_rate, ghostdag_data, selected_parent_blue_work, filter_non_activated) { match block { SampledBlock::Sampled(block) => { heap.try_push(block.hash, block.blue_work); @@ -482,7 +536,7 @@ impl, + filter_non_activated: bool, ) -> Option> { cache.get(&ghostdag_data.selected_parent).map(|selected_parent_window| { let mut heap = Lazy::new(|| BoundedSizeBlockHeap::from_binary_heap(window_size, (*selected_parent_window).clone())); // We pass a Lazy heap as an optimization to avoid cloning the selected parent heap in cases where the mergeset contains no samples - self.push_mergeset(&mut heap, sample_rate, ghostdag_data, selected_parent_blue_work, mergeset_non_daa_inserter); + self.push_mergeset( + &mut heap, + sample_rate, + ghostdag_data, + selected_parent_blue_work, + mergeset_non_daa_inserter, + filter_non_activated, + ); if let Ok(heap) = Lazy::into_value(heap) { Arc::new(heap.binary_heap) } else { @@ -532,6 +594,7 @@ impl impl Iterator + 'a { let selected_parent_block = SortableBlock::new(ghostdag_data.selected_parent, selected_parent_blue_work); let selected_parent_daa_score = self.headers_store.get_daa_score(ghostdag_data.selected_parent).unwrap(); @@ -541,10 +604,14 @@ impl u32 { - self.difficulty_manager.calculate_difficulty_bits(&daa_window.window) + fn calculate_difficulty_bits(&self, ghostdag_data: &GhostdagData, daa_window: &DaaWindow) -> u32 { + self.difficulty_manager.calculate_difficulty_bits(&daa_window.window, ghostdag_data) } fn calc_past_median_time(&self, ghostdag_data: &GhostdagData) -> Result<(u64, Arc), RuleError> { let window = self.block_window(ghostdag_data, WindowType::MedianTimeWindow)?; - let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + let past_median_time = self.past_median_time_manager.calc_past_median_time(&window, ghostdag_data.selected_parent)?; Ok((past_median_time, window)) } fn calc_past_median_time_for_known_hash(&self, hash: Hash) -> Result { if let Some(window) = self.block_window_cache_for_past_median_time.get(&hash, WindowOrigin::Sampled) { - let past_median_time = self.past_median_time_manager.calc_past_median_time(&window)?; + let past_median_time = self + .past_median_time_manager + .calc_past_median_time(&window, self.ghostdag_store.get_selected_parent(hash).unwrap())?; Ok(past_median_time) } else { let ghostdag_data = self.ghostdag_store.get_data(hash).unwrap(); @@ -664,7 +733,7 @@ pub struct DualWindowManager< > { ghostdag_store: Arc, headers_store: Arc, - sampling_activation: ForkActivation, + crescendo_activation: ForkActivation, full_window_manager: FullWindowManager, sampled_window_manager: SampledWindowManager, } @@ -681,11 +750,12 @@ impl, block_window_cache_for_past_median_time: Arc, max_difficulty_target: Uint256, - target_time_per_block: u64, - sampling_activation: ForkActivation, + prior_target_time_per_block: u64, + crescendo_target_time_per_block: u64, + crescendo_activation: ForkActivation, full_difficulty_window_size: usize, sampled_difficulty_window_size: usize, - min_difficulty_window_len: usize, + min_difficulty_window_size: usize, difficulty_sample_rate: u64, full_past_median_time_window_size: usize, sampled_past_median_time_window_size: usize, @@ -698,9 +768,9 @@ impl bool { let sp_daa_score = self.headers_store.get_daa_score(selected_parent).unwrap(); - self.sampling_activation.is_active(sp_daa_score) + self.crescendo_activation.is_active(sp_daa_score) } } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index db9f32c165..0a1e7943ef 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -527,15 +527,20 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let (address_manager, port_mapping_extender_svc) = AddressManager::new(config.clone(), meta_db, tick_service.clone()); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_extended_config( - config.target_time_per_block, + config.target_time_per_block(), false, config.max_block_mass, config.ram_scale, config.block_template_cache_lifetime, mining_counters.clone(), ))); - let mining_monitor = - Arc::new(MiningMonitor::new(mining_manager.clone(), mining_counters, tx_script_cache_counters.clone(), tick_service.clone())); + let mining_monitor = Arc::new(MiningMonitor::new( + mining_manager.clone(), + consensus_manager.clone(), + mining_counters, + tx_script_cache_counters.clone(), + tick_service.clone(), + )); let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index 12416be678..42dd566d70 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -106,11 +106,14 @@ pub enum NonStandardError { #[error("transaction version {1} is not in the valid range of {2}-{3}")] RejectVersion(TransactionId, u16, u16, u16), - #[error("transaction mass of {1} is larger than max allowed size of {2}")] - RejectMass(TransactionId, u64, u64), + #[error("transaction compute mass of {1} is larger than max allowed size of {2}")] + RejectComputeMass(TransactionId, u64, u64), - #[error("transaction mass in context (including storage mass) of {1} is larger than max allowed size of {2}")] - RejectContextualMass(TransactionId, u64, u64), + #[error("transaction transient (storage) mass of {1} is larger than max allowed size of {2}")] + RejectTransientMass(TransactionId, u64, u64), + + #[error("transaction storage mass of {1} is larger than max allowed size of {2}")] + RejectStorageMass(TransactionId, u64, u64), #[error("transaction input #{1}: signature script size of {2} bytes is larger than the maximum allowed size of {3} bytes")] RejectSignatureScriptSize(TransactionId, usize, u64, u64), @@ -138,8 +141,9 @@ impl NonStandardError { pub fn transaction_id(&self) -> &TransactionId { match self { NonStandardError::RejectVersion(id, _, _, _) => id, - NonStandardError::RejectMass(id, _, _) => id, - NonStandardError::RejectContextualMass(id, _, _) => id, + NonStandardError::RejectComputeMass(id, _, _) => id, + NonStandardError::RejectTransientMass(id, _, _) => id, + NonStandardError::RejectStorageMass(id, _, _) => id, NonStandardError::RejectSignatureScriptSize(id, _, _, _) => id, NonStandardError::RejectScriptPublicKeyVersion(id, _) => id, NonStandardError::RejectOutputScriptClass(id, _) => id, diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 32893312a1..c3d9df4dae 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -28,6 +28,7 @@ use kaspa_consensus_core::{ }, block::{BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, + config::params::ForkedParam, errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; @@ -46,6 +47,7 @@ pub struct MiningManager { } impl MiningManager { + // [Crescendo]: used for tests only so we can pass a single value target_time_per_block pub fn new( target_time_per_block: u64, relay_non_std_transactions: bool, @@ -53,12 +55,12 @@ impl MiningManager { cache_lifetime: Option, counters: Arc, ) -> Self { - let config = Config::build_default(target_time_per_block, relay_non_std_transactions, max_block_mass); + let config = Config::build_default(ForkedParam::new_const(target_time_per_block), relay_non_std_transactions, max_block_mass); Self::with_config(config, cache_lifetime, counters) } pub fn new_with_extended_config( - target_time_per_block: u64, + target_time_per_block: ForkedParam, relay_non_std_transactions: bool, max_block_mass: u64, ram_scale: f64, @@ -203,8 +205,11 @@ impl MiningManager { } /// Returns realtime feerate estimations based on internal mempool state - pub(crate) fn get_realtime_feerate_estimations(&self) -> FeerateEstimations { - let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.maximum_mass_per_block); + pub(crate) fn get_realtime_feerate_estimations(&self, virtual_daa_score: u64) -> FeerateEstimations { + let args = FeerateEstimatorArgs::new( + self.config.network_blocks_per_second.get(virtual_daa_score), + self.config.maximum_mass_per_block, + ); let estimator = self.mempool.read().build_feerate_estimator(args); estimator.calc_estimations(self.config.minimum_feerate()) } @@ -215,7 +220,10 @@ impl MiningManager { consensus: &dyn ConsensusApi, prefix: kaspa_addresses::Prefix, ) -> MiningManagerResult { - let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.maximum_mass_per_block); + let args = FeerateEstimatorArgs::new( + self.config.network_blocks_per_second.get(consensus.get_virtual_daa_score()), + self.config.maximum_mass_per_block, + ); let network_mass_per_second = args.network_mass_per_second(); let mempool_read = self.mempool.read(); let estimator = mempool_read.build_feerate_estimator(args); @@ -516,7 +524,7 @@ impl MiningManager { transactions[lower_bound..] .iter() .position(|tx| { - mass += tx.calculated_compute_mass.unwrap(); + mass += tx.calculated_non_contextual_masses.unwrap().max(); mass >= self.config.maximum_mass_per_block }) // Make sure the upper bound is greater than the lower bound, allowing to handle a very unlikely, @@ -854,8 +862,8 @@ impl MiningManagerProxy { } /// Returns realtime feerate estimations based on internal mempool state - pub async fn get_realtime_feerate_estimations(self) -> FeerateEstimations { - spawn_blocking(move || self.inner.get_realtime_feerate_estimations()).await.unwrap() + pub async fn get_realtime_feerate_estimations(self, virtual_daa_score: u64) -> FeerateEstimations { + spawn_blocking(move || self.inner.get_realtime_feerate_estimations(virtual_daa_score)).await.unwrap() } /// Returns realtime feerate estimations based on internal mempool state with additional verbose data diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 6ddc86e45b..25ae76ec8c 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -20,9 +20,10 @@ mod tests { api::ConsensusApi, block::TemplateBuildMode, coinbase::MinerData, + config::params::ForkedParam, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, errors::tx::TxRuleError, - mass::transaction_estimated_serialized_size, + mass::{transaction_estimated_serialized_size, NonContextualMasses}, subnets::SUBNETWORK_ID_NATIVE, tx::{ scriptvec, MutableTransaction, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, @@ -109,12 +110,12 @@ mod tests { tx.calculated_fee.unwrap() ); assert_eq!( - tx_to_insert.calculated_compute_mass.unwrap(), - tx.calculated_compute_mass.unwrap(), + tx_to_insert.calculated_non_contextual_masses.unwrap(), + tx.calculated_non_contextual_masses.unwrap(), "({priority:?}, {orphan:?}, {rbf_policy:?}) wrong mass in transaction {}: expected: {}, got: {}", tx.id(), - tx_to_insert.calculated_compute_mass.unwrap(), - tx.calculated_compute_mass.unwrap() + tx_to_insert.calculated_non_contextual_masses.unwrap(), + tx.calculated_non_contextual_masses.unwrap() ); } assert!( @@ -901,7 +902,7 @@ mod tests { ]; let consensus = Arc::new(ConsensusMock::new()); - let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); + let mut config = Config::build_default(ForkedParam::new_const(TARGET_TIME_PER_BLOCK), false, MAX_BLOCK_MASS); // Limit the orphan pool to 2 transactions config.maximum_orphan_transaction_count = 2; let counters = Arc::new(MiningCounters::default()); @@ -1129,7 +1130,7 @@ mod tests { let consensus = Arc::new(ConsensusMock::new()); let counters = Arc::new(MiningCounters::default()); - let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); + let mut config = Config::build_default(ForkedParam::new_const(TARGET_TIME_PER_BLOCK), false, MAX_BLOCK_MASS); let tx_size = txs[0].mempool_estimated_bytes(); let size_limit = TX_COUNT * tx_size; config.mempool_size_limit = size_limit; @@ -1348,7 +1349,9 @@ mod tests { let mut mutable_tx = MutableTransaction::from_tx(transaction); mutable_tx.calculated_fee = Some(DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE); // Please note: this is the ConsensusMock version of the calculated_mass which differs from Consensus - mutable_tx.calculated_compute_mass = Some(transaction_estimated_serialized_size(&mutable_tx.tx)); + let transaction_serialized_size = transaction_estimated_serialized_size(&mutable_tx.tx); + mutable_tx.calculated_non_contextual_masses = + Some(NonContextualMasses::new(transaction_serialized_size, transaction_serialized_size)); mutable_tx.entries[0] = Some(entry); mutable_tx diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index bf4e0cfb7f..2f9ff8a25d 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -2,12 +2,12 @@ use crate::mempool::{ errors::{NonStandardError, NonStandardResult}, Mempool, }; -use kaspa_consensus_core::hashing::sighash::SigHashReusedValuesUnsync; use kaspa_consensus_core::{ constants::{MAX_SCRIPT_PUBLIC_KEY_VERSION, MAX_SOMPI}, mass, tx::{MutableTransaction, PopulatedTransaction, TransactionOutput}, }; +use kaspa_consensus_core::{hashing::sighash::SigHashReusedValuesUnsync, mass::NonContextualMasses}; use kaspa_txscript::{get_sig_op_count_upper_bound, is_unspendable, script_class::ScriptClass}; /// MAX_STANDARD_P2SH_SIG_OPS is the maximum number of signature operations @@ -61,12 +61,12 @@ impl Mempool { // almost as much to process as the sender fees, limit the maximum // size of a transaction. This also helps mitigate CPU exhaustion // attacks. - if transaction.calculated_compute_mass.unwrap() > MAXIMUM_STANDARD_TRANSACTION_MASS { - return Err(NonStandardError::RejectMass( - transaction_id, - transaction.calculated_compute_mass.unwrap(), - MAXIMUM_STANDARD_TRANSACTION_MASS, - )); + let NonContextualMasses { compute_mass, transient_mass } = transaction.calculated_non_contextual_masses.unwrap(); + if compute_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + return Err(NonStandardError::RejectComputeMass(transaction_id, compute_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); + } + if transient_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + return Err(NonStandardError::RejectTransientMass(transaction_id, transient_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } for (i, input) in transaction.tx.inputs.iter().enumerate() { @@ -172,9 +172,8 @@ impl Mempool { pub(crate) fn check_transaction_standard_in_context(&self, transaction: &MutableTransaction) -> NonStandardResult<()> { let transaction_id = transaction.id(); let contextual_mass = transaction.tx.mass(); - assert!(contextual_mass > 0, "expected to be set by consensus"); if contextual_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { - return Err(NonStandardError::RejectContextualMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); + return Err(NonStandardError::RejectStorageMass(transaction_id, contextual_mass, MAXIMUM_STANDARD_TRANSACTION_MASS)); } for (i, input) in transaction.tx.inputs.iter().enumerate() { // It is safe to elide existence and index checks here since @@ -199,8 +198,10 @@ impl Mempool { } } - // TODO: For now, until wallets adapt, we don't require fee as function of full contextual_mass (but the fee/mass ratio will affect tx selection to block template) - let minimum_fee = self.minimum_required_transaction_relay_fee(transaction.calculated_compute_mass.unwrap()); + // TODO: For now, until wallets adapt, we only require minimum fee as function of compute mass (but the fee/mass ratio will + // use the max over all masses and will affect tx selection to block template) + let minimum_fee = + self.minimum_required_transaction_relay_fee(transaction.calculated_non_contextual_masses.unwrap().compute_mass); if transaction.calculated_fee.unwrap() < minimum_fee { return Err(NonStandardError::RejectInsufficientFee(transaction_id, transaction.calculated_fee.unwrap(), minimum_fee)); } @@ -241,6 +242,7 @@ mod tests { use kaspa_consensus_core::{ config::params::Params, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, + mass::NonContextualMasses, network::NetworkType, subnets::SUBNETWORK_ID_NATIVE, tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}, @@ -292,7 +294,7 @@ mod tests { for test in tests.iter() { for net in NetworkType::iter() { let params: Params = net.into(); - let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); + let mut config = Config::build_default(params.target_time_per_block(), false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); @@ -377,7 +379,7 @@ mod tests { for test in tests { for net in NetworkType::iter() { let params: Params = net.into(); - let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); + let mut config = Config::build_default(params.target_time_per_block(), false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); @@ -412,7 +414,7 @@ mod tests { fn new_mtx(tx: Transaction, mass: u64) -> MutableTransaction { let mut mtx = MutableTransaction::from_tx(tx); - mtx.calculated_compute_mass = Some(mass); + mtx.calculated_non_contextual_masses = Some(NonContextualMasses::new(mass, mass)); mtx } @@ -557,7 +559,7 @@ mod tests { for test in tests { for net in NetworkType::iter() { let params: Params = net.into(); - let config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); + let config = Config::build_default(params.target_time_per_block(), false, params.max_block_mass); let counters = Arc::new(MiningCounters::default()); let mempool = Mempool::new(Arc::new(config), counters); diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 04407b411e..40527f9a5a 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -1,4 +1,4 @@ -use kaspa_consensus_core::constants::TX_VERSION; +use kaspa_consensus_core::{config::params::ForkedParam, constants::TX_VERSION}; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: usize = 1_000_000; pub(crate) const DEFAULT_MEMPOOL_SIZE_LIMIT: usize = 1_000_000_000; @@ -30,14 +30,14 @@ pub struct Config { pub maximum_transaction_count: usize, pub mempool_size_limit: usize, pub maximum_build_block_template_attempts: u64, - pub transaction_expire_interval_daa_score: u64, - pub transaction_expire_scan_interval_daa_score: u64, + pub transaction_expire_interval_daa_score: ForkedParam, + pub transaction_expire_scan_interval_daa_score: ForkedParam, pub transaction_expire_scan_interval_milliseconds: u64, - pub accepted_transaction_expire_interval_daa_score: u64, - pub accepted_transaction_expire_scan_interval_daa_score: u64, + pub accepted_transaction_expire_interval_daa_score: ForkedParam, + pub accepted_transaction_expire_scan_interval_daa_score: ForkedParam, pub accepted_transaction_expire_scan_interval_milliseconds: u64, - pub orphan_expire_interval_daa_score: u64, - pub orphan_expire_scan_interval_daa_score: u64, + pub orphan_expire_interval_daa_score: ForkedParam, + pub orphan_expire_scan_interval_daa_score: ForkedParam, pub maximum_orphan_transaction_mass: u64, pub maximum_orphan_transaction_count: u64, pub accept_non_standard: bool, @@ -45,7 +45,7 @@ pub struct Config { pub minimum_relay_transaction_fee: u64, pub minimum_standard_transaction_version: u16, pub maximum_standard_transaction_version: u16, - pub network_blocks_per_second: u64, + pub network_blocks_per_second: ForkedParam, } impl Config { @@ -54,14 +54,14 @@ impl Config { maximum_transaction_count: usize, mempool_size_limit: usize, maximum_build_block_template_attempts: u64, - transaction_expire_interval_daa_score: u64, - transaction_expire_scan_interval_daa_score: u64, + transaction_expire_interval_daa_score: ForkedParam, + transaction_expire_scan_interval_daa_score: ForkedParam, transaction_expire_scan_interval_milliseconds: u64, - accepted_transaction_expire_interval_daa_score: u64, - accepted_transaction_expire_scan_interval_daa_score: u64, + accepted_transaction_expire_interval_daa_score: ForkedParam, + accepted_transaction_expire_scan_interval_daa_score: ForkedParam, accepted_transaction_expire_scan_interval_milliseconds: u64, - orphan_expire_interval_daa_score: u64, - orphan_expire_scan_interval_daa_score: u64, + orphan_expire_interval_daa_score: ForkedParam, + orphan_expire_scan_interval_daa_score: ForkedParam, maximum_orphan_transaction_mass: u64, maximum_orphan_transaction_count: u64, accept_non_standard: bool, @@ -69,7 +69,7 @@ impl Config { minimum_relay_transaction_fee: u64, minimum_standard_transaction_version: u16, maximum_standard_transaction_version: u16, - network_blocks_per_second: u64, + network_blocks_per_second: ForkedParam, ) -> Self { Self { maximum_transaction_count, @@ -96,22 +96,28 @@ impl Config { /// Build a default config. /// The arguments should be obtained from the current consensus [`kaspa_consensus_core::config::params::Params`] instance. - pub const fn build_default(target_milliseconds_per_block: u64, relay_non_std_transactions: bool, max_block_mass: u64) -> Self { + pub fn build_default( + target_milliseconds_per_block: ForkedParam, + relay_non_std_transactions: bool, + max_block_mass: u64, + ) -> Self { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, mempool_size_limit: DEFAULT_MEMPOOL_SIZE_LIMIT, maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, - transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, - transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 - / target_milliseconds_per_block, + transaction_expire_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / v), + transaction_expire_scan_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / v), transaction_expire_scan_interval_milliseconds: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, - accepted_transaction_expire_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 - / target_milliseconds_per_block, - accepted_transaction_expire_scan_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 - / target_milliseconds_per_block, + accepted_transaction_expire_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / v), + accepted_transaction_expire_scan_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / v), accepted_transaction_expire_scan_interval_milliseconds: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, - orphan_expire_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, - orphan_expire_scan_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, + orphan_expire_interval_daa_score: target_milliseconds_per_block.map(|v| DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS * 1000 / v), + orphan_expire_scan_interval_daa_score: target_milliseconds_per_block + .map(|v| DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / v), maximum_orphan_transaction_mass: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_MASS, maximum_orphan_transaction_count: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_COUNT, accept_non_standard: relay_non_std_transactions, @@ -119,7 +125,7 @@ impl Config { minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, minimum_standard_transaction_version: DEFAULT_MINIMUM_STANDARD_TRANSACTION_VERSION, maximum_standard_transaction_version: DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION, - network_blocks_per_second: 1000 / target_milliseconds_per_block, + network_blocks_per_second: target_milliseconds_per_block.map(|v| 1000 / v), } } diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs index 94ad0d0761..b22eddafb7 100644 --- a/mining/src/mempool/model/accepted_transactions.rs +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -43,7 +43,8 @@ impl AcceptedTransactions { pub(crate) fn expire(&mut self, virtual_daa_score: u64) { let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score + if virtual_daa_score + < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score.get(virtual_daa_score) || now < self.last_expire_scan_time + self.config.accepted_transaction_expire_scan_interval_milliseconds { return; @@ -53,7 +54,7 @@ impl AcceptedTransactions { .transactions .iter() .filter_map(|(transaction_id, daa_score)| { - if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score { + if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score.get(virtual_daa_score) { Some(*transaction_id) } else { None diff --git a/mining/src/mempool/model/frontier/feerate_key.rs b/mining/src/mempool/model/frontier/feerate_key.rs index 843ef0ff13..e1fa0e8a56 100644 --- a/mining/src/mempool/model/frontier/feerate_key.rs +++ b/mining/src/mempool/model/frontier/feerate_key.rs @@ -1,5 +1,5 @@ use crate::{block_template::selector::ALPHA, mempool::model::tx::MempoolTransaction}; -use kaspa_consensus_core::tx::Transaction; +use kaspa_consensus_core::{mass::ContextualMasses, tx::Transaction}; use std::sync::Arc; #[derive(Clone, Debug)] @@ -77,9 +77,13 @@ impl Ord for FeerateTransactionKey { impl From<&MempoolTransaction> for FeerateTransactionKey { fn from(tx: &MempoolTransaction) -> Self { - let mass = tx.mtx.tx.mass(); + // NOTE: The code below is a mempool simplification reducing the various block mass units to a + // single one-dimension value (making it easier to select transactions for block templates). + // Future mempool improvements are expected to refine this behavior and use the multi-dimension values + // in order to optimize and increase block space usage. + let mass = ContextualMasses::new(tx.mtx.tx.mass()) + .max(tx.mtx.calculated_non_contextual_masses.expect("masses are expected to be calculated")); let fee = tx.mtx.calculated_fee.expect("fee is expected to be populated"); - assert_ne!(mass, 0, "mass field is expected to be set when inserting to the mempool"); Self::new(fee, mass, tx.mtx.tx.clone()) } } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index f813e1a56b..851a0ed1f5 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -99,9 +99,9 @@ impl OrphanPool { } fn check_orphan_mass(&self, transaction: &MutableTransaction) -> RuleResult<()> { - if transaction.calculated_compute_mass.unwrap() > self.config.maximum_orphan_transaction_mass { + if transaction.calculated_non_contextual_masses.unwrap().max() > self.config.maximum_orphan_transaction_mass { return Err(RuleError::RejectBadOrphanMass( - transaction.calculated_compute_mass.unwrap(), + transaction.calculated_non_contextual_masses.unwrap().max(), self.config.maximum_orphan_transaction_mass, )); } @@ -265,7 +265,7 @@ impl OrphanPool { } pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score { + if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score.get(virtual_daa_score) { return Ok(()); } @@ -276,7 +276,7 @@ impl OrphanPool { .values() .filter_map(|x| { if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score + && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score.get(virtual_daa_score) { Some(x.id()) } else { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 5741831d3f..c7a4f5a2b5 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -248,7 +248,7 @@ impl TransactionsPool { } // We are iterating ready txs by ascending feerate so the pending tx has lower feerate than all remaining txs - if tx.fee_rate() > feerate_threshold { + if tx.feerate() > feerate_threshold { let err = RuleError::RejectMempoolIsFull; debug!("Transaction {} with feerate {} has been rejected: {}", transaction.id(), feerate_threshold, err); return Err(err); @@ -314,7 +314,8 @@ impl TransactionsPool { pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score + if virtual_daa_score + < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score.get(virtual_daa_score) || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds { return vec![]; @@ -329,7 +330,8 @@ impl TransactionsPool { .values() .filter_map(|x| { if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score + && virtual_daa_score + > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score.get(virtual_daa_score) { Some(x.id()) } else { diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 27bb87d09d..280b5ef0d0 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -22,10 +22,8 @@ impl MempoolTransaction { self.mtx.tx.id() } - pub(crate) fn fee_rate(&self) -> f64 { - let contextual_mass = self.mtx.tx.mass(); - assert!(contextual_mass > 0, "expected to be called for validated txs only"); - self.mtx.calculated_fee.unwrap() as f64 / contextual_mass as f64 + pub(crate) fn feerate(&self) -> f64 { + self.mtx.calculated_feerate().unwrap() } } diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 69e08019b6..db95f21108 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -25,7 +25,7 @@ impl Mempool { ) -> RuleResult { self.validate_transaction_unacceptance(&transaction)?; // Populate mass and estimated_size in the beginning, it will be used in multiple places throughout the validation and insertion. - transaction.calculated_compute_mass = Some(consensus.calculate_transaction_compute_mass(&transaction.tx)); + transaction.calculated_non_contextual_masses = Some(consensus.calculate_transaction_non_contextual_masses(&transaction.tx)); self.validate_transaction_in_isolation(&transaction)?; let feerate_threshold = self.get_replace_by_fee_constraint(&transaction, rbf_policy)?; self.populate_mempool_entries(&mut transaction); diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 74449424c1..7358853883 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -1,5 +1,6 @@ use super::MiningCounters; use crate::manager::MiningManagerProxy; +use kaspa_consensusmanager::ConsensusManager; use kaspa_core::{ debug, info, task::{ @@ -16,6 +17,8 @@ const MONITOR: &str = "mempool-monitor"; pub struct MiningMonitor { mining_manager: MiningManagerProxy, + consensus_manager: Arc, + // Counters counters: Arc, @@ -28,11 +31,12 @@ pub struct MiningMonitor { impl MiningMonitor { pub fn new( mining_manager: MiningManagerProxy, + consensus_manager: Arc, counters: Arc, tx_script_cache_counters: Arc, tick_service: Arc, ) -> MiningMonitor { - MiningMonitor { mining_manager, counters, tx_script_cache_counters, tick_service } + MiningMonitor { mining_manager, consensus_manager, counters, tx_script_cache_counters, tick_service } } pub async fn worker(self: &Arc) { @@ -66,7 +70,11 @@ impl MiningMonitor { delta.low_priority_tx_counts, delta.tx_accepted_counts, ); - let feerate_estimations = self.mining_manager.clone().get_realtime_feerate_estimations().await; + let feerate_estimations = self + .mining_manager + .clone() + .get_realtime_feerate_estimations(self.consensus_manager.consensus().unguarded_session().get_virtual_daa_score()) + .await; debug!("Realtime feerate estimations: {}", feerate_estimations); } if delta.tx_evicted_counts > 0 { diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index 28d3f58974..c0328504e4 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -13,7 +13,7 @@ use kaspa_consensus_core::{ tx::{TxResult, TxRuleError}, }, header::Header, - mass::transaction_estimated_serialized_size, + mass::{transaction_estimated_serialized_size, ContextualMasses, NonContextualMasses}, merkle::calc_hash_merkle_root, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, utxo::utxo_collection::UtxoCollection, @@ -133,9 +133,7 @@ impl ConsensusApi for ConsensusMock { // At this point we know all UTXO entries are populated, so we can safely calculate the fee let total_in: u64 = mutable_tx.entries.iter().map(|x| x.as_ref().unwrap().amount).sum(); let total_out: u64 = mutable_tx.tx.outputs.iter().map(|x| x.value).sum(); - mutable_tx - .tx - .set_mass(self.calculate_transaction_storage_mass(mutable_tx).unwrap() + mutable_tx.calculated_compute_mass.unwrap()); + mutable_tx.tx.set_mass(self.calculate_transaction_contextual_masses(mutable_tx).unwrap().storage_mass); if mutable_tx.calculated_fee.is_none() { let calculated_fee = total_in - total_out; @@ -156,16 +154,13 @@ impl ConsensusApi for ConsensusMock { transactions.iter_mut().map(|x| self.validate_mempool_transaction(x, &Default::default())).collect() } - fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { - if transaction.is_coinbase() { - 0 - } else { - transaction_estimated_serialized_size(transaction) - } + fn calculate_transaction_non_contextual_masses(&self, transaction: &Transaction) -> NonContextualMasses { + let mass = if transaction.is_coinbase() { 0 } else { transaction_estimated_serialized_size(transaction) }; + NonContextualMasses::new(mass, mass) } - fn calculate_transaction_storage_mass(&self, _transaction: &MutableTransaction) -> Option { - Some(0) + fn calculate_transaction_contextual_masses(&self, _transaction: &MutableTransaction) -> Option { + Some(ContextualMasses::new(0)) } fn get_virtual_daa_score(&self) -> u64 { diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 14d4168aca..880e6eefef 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -228,6 +228,9 @@ pub struct FlowContextInner { // Special sampling logger used only for high-bps networks where logs must be throttled block_event_logger: Option, + // Bps upper bound + bps_upper_bound: usize, + // Orphan parameters orphan_resolution_range: u32, max_orphans: usize, @@ -306,11 +309,13 @@ impl FlowContext { ) -> Self { let hub = Hub::new(); - let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (config.bps() as f64).log2().ceil() as u32; + let bps_upper_bound = config.bps().upper_bound() as usize; + let orphan_resolution_range = BASELINE_ORPHAN_RESOLUTION_RANGE + (bps_upper_bound as f64).log2().ceil() as u32; // The maximum amount of orphans allowed in the orphans pool. This number is an approximation // of how many orphans there can possibly be on average bounded by an upper bound. - let max_orphans = (2u64.pow(orphan_resolution_range) as usize * config.ghostdag_k as usize).min(MAX_ORPHANS_UPPER_BOUND); + let max_orphans = + (2u64.pow(orphan_resolution_range) as usize * config.ghostdag_k().upper_bound() as usize).min(MAX_ORPHANS_UPPER_BOUND); Self { inner: Arc::new(FlowContextInner { node_id: Uuid::new_v4().into(), @@ -327,7 +332,8 @@ impl FlowContext { mining_manager, tick_service, notification_root, - block_event_logger: if config.bps() > 1 { Some(BlockEventLogger::new(config.bps() as usize)) } else { None }, + block_event_logger: if bps_upper_bound > 1 { Some(BlockEventLogger::new(bps_upper_bound)) } else { None }, + bps_upper_bound, orphan_resolution_range, max_orphans, config, @@ -336,7 +342,7 @@ impl FlowContext { } pub fn block_invs_channel_size(&self) -> usize { - self.config.bps() as usize * Router::incoming_flow_baseline_channel_size() + self.bps_upper_bound * Router::incoming_flow_baseline_channel_size() } pub fn orphan_resolution_range(&self) -> u32 { diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 0dd7fe64f1..ad33304ceb 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -181,22 +181,35 @@ impl IbdFlow { // means it's in its antichain (because if `highest_known_syncer_chain_hash` was in // the pruning point's past the pruning point itself would be // `highest_known_syncer_chain_hash`). So it means there's a finality conflict. - // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) + // + // TODO (relaxed): consider performing additional actions on finality conflicts in addition + // to disconnecting from the peer (e.g., banning, rpc notification) return Ok(IbdType::None); } let hst_header = consensus.async_get_header(consensus.async_get_headers_selected_tip().await).await.unwrap(); - if relay_header.blue_score >= hst_header.blue_score + self.ctx.config.pruning_depth - && relay_header.blue_work > hst_header.blue_work - { - if unix_now() > consensus.async_creation_timestamp().await + self.ctx.config.finality_duration() { + // [Crescendo]: use the post crescendo pruning depth depending on hst's DAA score. + // Having a shorter depth for this condition for the fork transition period (if hst is shortly before activation) + // is negligible since there are other conditions required for activating an headers proof IBD. The important + // thing is that we eventually adjust to the longer period. + let pruning_depth = self.ctx.config.pruning_depth().get(hst_header.daa_score); + if relay_header.blue_score >= hst_header.blue_score + pruning_depth && relay_header.blue_work > hst_header.blue_work { + // [Crescendo]: switch to the new *shorter* finality duration only after sufficient time has passed + // since activation (measured via the new *larger* finality depth). + // Note: these are not critical execution paths so such estimation heuristics are completely ok in this context. + let finality_duration_in_milliseconds = self + .ctx + .config + .finality_duration_in_milliseconds() + .get(hst_header.daa_score.saturating_sub(self.ctx.config.finality_depth().upper_bound())); + if unix_now() > consensus.async_creation_timestamp().await + finality_duration_in_milliseconds { let fp = consensus.async_finality_point().await; let fp_ts = consensus.async_get_header(fp).await?.timestamp; - if unix_now() < fp_ts + self.ctx.config.finality_duration() * 3 / 2 { + if unix_now() < fp_ts + finality_duration_in_milliseconds * 3 / 2 { // We reject the headers proof if the node has a relatively up-to-date finality point and current // consensus has matured for long enough (and not recently synced). This is mostly a spam-protector // since subsequent checks identify these violations as well - // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) + // TODO (relaxed): consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Ok(IbdType::None); } } @@ -272,7 +285,7 @@ impl IbdFlow { // Check if past pruning points violate finality of current consensus if self.ctx.consensus().session().await.async_are_pruning_points_violating_finality(pruning_points.clone()).await { - // TODO: consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) + // TODO (relaxed): consider performing additional actions on finality conflicts in addition to disconnecting from the peer (e.g., banning, rpc notification) return Err(ProtocolError::Other("pruning points are violating finality")); } @@ -340,7 +353,7 @@ impl IbdFlow { .await?; } - // TODO: add logs to staging commit process + // TODO (relaxed): add logs to staging commit process info!("Starting to process {} trusted blocks", trusted_set.len()); let mut last_time = Instant::now(); @@ -353,7 +366,7 @@ impl IbdFlow { last_time = now; last_index = i; } - // TODO: queue and join in batches + // TODO (relaxed): queue and join in batches staging.validate_and_insert_trusted_block(tb).virtual_state_task.await?; } info!("Done processing trusted blocks"); @@ -503,7 +516,7 @@ staging selected tip ({}) is too small or negative. Aborting IBD...", } async fn sync_missing_block_bodies(&mut self, consensus: &ConsensusProxy, high: Hash) -> Result<(), ProtocolError> { - // TODO: query consensus in batches + // TODO (relaxed): query consensus in batches let sleep_task = sleep(Duration::from_secs(2)); let hashes_task = consensus.async_get_missing_block_body_hashes(high); tokio::pin!(sleep_task); diff --git a/protocol/flows/src/v5/ibd/negotiate.rs b/protocol/flows/src/v5/ibd/negotiate.rs index b001d3a061..20963c14e7 100644 --- a/protocol/flows/src/v5/ibd/negotiate.rs +++ b/protocol/flows/src/v5/ibd/negotiate.rs @@ -131,7 +131,7 @@ impl IbdFlow { self.router, negotiation_restart_counter ))); } - if negotiation_restart_counter > self.ctx.config.bps() { + if negotiation_restart_counter > self.ctx.config.bps().upper_bound() { // bps is just an intuitive threshold here warn!("IBD chain negotiation with syncer {} restarted {} times", self.router, negotiation_restart_counter); } else { diff --git a/protocol/flows/src/v5/request_antipast.rs b/protocol/flows/src/v5/request_antipast.rs index f5881efc1a..521418a288 100644 --- a/protocol/flows/src/v5/request_antipast.rs +++ b/protocol/flows/src/v5/request_antipast.rs @@ -46,7 +46,9 @@ impl HandleAntipastRequests { // intersected by past of the relayed block. We do not expect the relay block to be too much after // the sink (in fact usually it should be in its past or anticone), hence we bound the expected traversal to be // in the order of `mergeset_size_limit`. - let hashes = session.async_get_antipast_from_pov(block, context, Some(self.ctx.config.mergeset_size_limit * 2)).await?; + let hashes = session + .async_get_antipast_from_pov(block, context, Some(self.ctx.config.mergeset_size_limit().upper_bound() * 4)) + .await?; let mut headers = session .spawn_blocking(|c| hashes.into_iter().map(|h| c.get_header(h)).collect::, ConsensusError>>()) .await?; diff --git a/protocol/flows/src/v5/request_headers.rs b/protocol/flows/src/v5/request_headers.rs index d8f12b6f06..38f2fcac81 100644 --- a/protocol/flows/src/v5/request_headers.rs +++ b/protocol/flows/src/v5/request_headers.rs @@ -37,7 +37,7 @@ impl RequestHeadersFlow { async fn start_impl(&mut self) -> Result<(), ProtocolError> { const MAX_BLOCKS: usize = 1 << 10; // Internal consensus logic requires that `max_blocks > mergeset_size_limit` - let max_blocks = max(MAX_BLOCKS, self.ctx.config.mergeset_size_limit as usize + 1); + let max_blocks = max(MAX_BLOCKS, self.ctx.config.mergeset_size_limit().upper_bound() as usize + 1); loop { let (msg, request_id) = dequeue_with_request_id!(self.incoming_route, Payload::RequestHeaders)?; diff --git a/protocol/flows/src/v6/mod.rs b/protocol/flows/src/v6/mod.rs index 8736f4e0b9..b73fae2d3f 100644 --- a/protocol/flows/src/v6/mod.rs +++ b/protocol/flows/src/v6/mod.rs @@ -128,7 +128,7 @@ pub fn register(ctx: FlowContext, router: Arc) -> Vec> { let invs_route = router.subscribe_with_capacity(vec![KaspadMessagePayloadType::InvRelayBlock], ctx.block_invs_channel_size()); let shared_invs_route = SharedIncomingRoute::new(invs_route); - let num_relay_flows = (ctx.config.bps() as usize / 2).max(1); + let num_relay_flows = (ctx.config.bps().upper_bound() as usize / 2).max(1); flows.extend((0..num_relay_flows).map(|_| { Box::new(HandleRelayInvsFlow::new( ctx.clone(), diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index 495e681e9a..bdf471c236 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -4,7 +4,7 @@ use clap::{Arg, ArgAction, Command}; use itertools::Itertools; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ - config::params::{TESTNET11_PARAMS, TESTNET_PARAMS}, + config::params::TESTNET_PARAMS, constants::{SOMPI_PER_KASPA, TX_VERSION}, sign::sign, subnets::SUBNETWORK_ID_NATIVE, @@ -213,7 +213,7 @@ async fn main() { let info = rpc_client.get_block_dag_info().await.expect("Failed to get block dag info."); let coinbase_maturity = match info.network.suffix { - Some(11) => TESTNET11_PARAMS.coinbase_maturity, + Some(11) => panic!("TN11 is not supported on this version"), None | Some(_) => TESTNET_PARAMS.coinbase_maturity, }; info!( diff --git a/rpc/grpc/server/src/service.rs b/rpc/grpc/server/src/service.rs index 7d810bf974..0dd4bb8972 100644 --- a/rpc/grpc/server/src/service.rs +++ b/rpc/grpc/server/src/service.rs @@ -64,7 +64,7 @@ impl AsyncService for GrpcService { let manager = Manager::new(self.rpc_max_clients); let grpc_adaptor = Adaptor::server( self.net_address, - self.config.bps(), + self.config.bps().upper_bound(), manager, self.core_service.clone(), self.core_service.notifier(), diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index c744300e52..5080280476 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -125,7 +125,7 @@ impl ConsensusConverter { let verbose_data = Some(RpcTransactionVerboseData { transaction_id: transaction.id(), hash: hash(transaction, false), - compute_mass: consensus.calculate_transaction_compute_mass(transaction), + compute_mass: consensus.calculate_transaction_non_contextual_masses(transaction).compute_mass, // TODO: make block_hash an option block_hash: header.map_or_else(RpcHash::default, |x| x.hash), block_time: header.map_or(0, |x| x.timestamp), diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index c8c40c7707..4a5b6a9cca 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -318,8 +318,14 @@ impl RpcApi for RpcCoreService { // A simple heuristic check which signals that the mined block is out of date // and should not be accepted unless user explicitly requests - let daa_window_block_duration = self.config.daa_window_duration_in_blocks(virtual_daa_score); - if virtual_daa_score > daa_window_block_duration && block.header.daa_score < virtual_daa_score - daa_window_block_duration + // + // [Crescendo]: switch to the larger duration only after a full window with the new duration is reached post activation + let difficulty_window_duration = self + .config + .difficulty_window_duration_in_block_units() + .get(virtual_daa_score.saturating_sub(self.config.difficulty_window_duration_in_block_units().after())); + if virtual_daa_score > difficulty_window_duration + && block.header.daa_score < virtual_daa_score - difficulty_window_duration { // error = format!("Block rejected. Reason: block DAA score {0} is too far behind virtual's DAA score {1}", block.header.daa_score, virtual_daa_score) return Ok(SubmitBlockResponse { report: SubmitBlockReport::Reject(SubmitBlockRejectReason::BlockInvalid) }); @@ -439,7 +445,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and // We use +1 because low_hash is also returned // max_blocks MUST be >= mergeset_size_limit + 1 - let max_blocks = self.config.mergeset_size_limit as usize + 1; + let max_blocks = self.config.mergeset_size_limit().upper_bound() as usize + 1; let (block_hashes, high_hash) = session.async_get_hashes_between(low_hash, sink_hash, max_blocks).await?; // If the high hash is equal to sink it means get_hashes_between didn't skip any hashes, and @@ -616,7 +622,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and // this bounds by number of merged blocks, if include_accepted_transactions = true // else it returns the batch_size amount on pure chain blocks. // Note: batch_size does not bound removed chain blocks, only added chain blocks. - let batch_size = (self.config.mergeset_size_limit * 10) as usize; + let batch_size = (self.config.mergeset_size_limit().upper_bound() * 10) as usize; let mut virtual_chain_batch = session.async_get_virtual_chain_from_block(request.start_hash, Some(batch_size)).await?; let accepted_transaction_ids = if request.include_accepted_transaction_ids { let accepted_transaction_ids = self @@ -718,6 +724,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and let mut header_idx = 0; let mut req_idx = 0; + // TODO (crescendo) + // Loop runs at O(n + m) where n = # pp headers, m = # requested daa_scores // Loop will always end because in the worst case the last header with daa_score = 0 (the genesis) // will cause every remaining requested daa_score to be "found in range" @@ -732,7 +740,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and // For daa_score later than the last header, we estimate in milliseconds based on the difference let time_adjustment = if header_idx == 0 { // estimate milliseconds = (daa_score * target_time_per_block) - (curr_daa_score - header.daa_score).checked_mul(self.config.target_time_per_block).unwrap_or(u64::MAX) + (curr_daa_score - header.daa_score).checked_mul(self.config.prior_target_time_per_block).unwrap_or(u64::MAX) } else { // "next" header is the one that we processed last iteration let next_header = &headers[header_idx - 1]; @@ -766,8 +774,16 @@ NOTE: This error usually indicates an RPC conversion error between the node and _request: GetFeeEstimateRequest, ) -> RpcResult { let mining_manager = self.mining_manager.clone(); - let estimate = - self.fee_estimate_cache.get(async move { mining_manager.get_realtime_feerate_estimations().await.into_rpc() }).await; + let consensus_manager = self.consensus_manager.clone(); + let estimate = self + .fee_estimate_cache + .get(async move { + mining_manager + .get_realtime_feerate_estimations(consensus_manager.consensus().unguarded_session().get_virtual_daa_score()) + .await + .into_rpc() + }) + .await; Ok(GetFeeEstimateResponse { estimate }) } @@ -864,8 +880,8 @@ NOTE: This error usually indicates an RPC conversion error between the node and if !self.config.unsafe_rpc && request.window_size > MAX_SAFE_WINDOW_SIZE { return Err(RpcError::WindowSizeExceedingMaximum(request.window_size, MAX_SAFE_WINDOW_SIZE)); } - if request.window_size as u64 > self.config.pruning_depth { - return Err(RpcError::WindowSizeExceedingPruningDepth(request.window_size, self.config.pruning_depth)); + if request.window_size as u64 > self.config.pruning_depth().lower_bound() { + return Err(RpcError::WindowSizeExceedingPruningDepth(request.window_size, self.config.prior_pruning_depth)); } // In the previous golang implementation the convention for virtual was the following const. diff --git a/simpa/src/main.rs b/simpa/src/main.rs index c35c0c640e..728971725a 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -13,7 +13,7 @@ use kaspa_consensus::{ headers::HeaderStoreReader, relations::RelationsStoreReader, }, - params::{ForkActivation, Params, Testnet11Bps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, TESTNET11_PARAMS}, + params::{ForkActivation, Params, TenBps, DEVNET_PARAMS, NETWORK_DELAY_BOUND, SIMNET_PARAMS}, }; use kaspa_consensus_core::{ api::ConsensusApi, block::Block, blockstatus::BlockStatus, config::bps::calculate_ghostdag_k, errors::block::BlockProcessResult, @@ -189,11 +189,10 @@ fn main_impl(mut args: Args) { args.miners ); } - args.bps = if args.testnet11 { Testnet11Bps::bps() as f64 } else { args.bps }; - let mut params = if args.testnet11 { TESTNET11_PARAMS } else { DEVNET_PARAMS }; - params.storage_mass_activation = ForkActivation::new(400); + args.bps = if args.testnet11 { TenBps::bps() as f64 } else { args.bps }; + let mut params = if args.testnet11 { SIMNET_PARAMS } else { DEVNET_PARAMS }; + params.crescendo_activation = ForkActivation::always(); params.storage_mass_parameter = 10_000; - params.payload_activation = ForkActivation::always(); let mut builder = ConfigBuilder::new(params) .apply_args(|config| apply_args_to_consensus_params(&args, &mut config.params)) .apply_args(|config| apply_args_to_perf_params(&args, &mut config.perf)) @@ -293,47 +292,46 @@ fn apply_args_to_consensus_params(args: &Args, params: &mut Params) { if args.testnet11 { info!( "Using kaspa-testnet-11 configuration (GHOSTDAG K={}, DAA window size={}, Median time window size={})", - params.ghostdag_k, - params.difficulty_window_size(0), - params.past_median_time_window_size(0), + params.ghostdag_k().before(), + params.difficulty_window_size().before(), + params.past_median_time_window_size().before(), ); } else { let max_delay = args.delay.max(NETWORK_DELAY_BOUND as f64); - let k = u64::max(calculate_ghostdag_k(2.0 * max_delay * args.bps, 0.05), params.ghostdag_k as u64); + let k = u64::max(calculate_ghostdag_k(2.0 * max_delay * args.bps, 0.05), params.ghostdag_k().before() as u64); let k = u64::min(k, KType::MAX as u64) as KType; // Clamp to KType::MAX - params.ghostdag_k = k; - params.mergeset_size_limit = k as u64 * 10; - params.max_block_parents = u8::max((0.66 * k as f64) as u8, 10); - params.target_time_per_block = (1000.0 / args.bps) as u64; - params.merge_depth = (params.merge_depth as f64 * args.bps) as u64; + params.prior_ghostdag_k = k; + params.prior_mergeset_size_limit = k as u64 * 10; + params.prior_max_block_parents = u8::max((0.66 * k as f64) as u8, 10); + params.prior_target_time_per_block = (1000.0 / args.bps) as u64; + params.prior_merge_depth = (params.prior_merge_depth as f64 * args.bps) as u64; params.coinbase_maturity = (params.coinbase_maturity as f64 * f64::max(1.0, args.bps * args.delay * 0.25)) as u64; if args.daa_legacy { // Scale DAA and median-time windows linearly with BPS - params.sampling_activation = ForkActivation::never(); - params.legacy_timestamp_deviation_tolerance = (params.legacy_timestamp_deviation_tolerance as f64 * args.bps) as u64; - params.legacy_difficulty_window_size = (params.legacy_difficulty_window_size as f64 * args.bps) as usize; + params.crescendo_activation = ForkActivation::never(); + params.timestamp_deviation_tolerance = (params.timestamp_deviation_tolerance as f64 * args.bps) as u64; + params.prior_difficulty_window_size = (params.prior_difficulty_window_size as f64 * args.bps) as usize; } else { // Use the new sampling algorithms - params.sampling_activation = ForkActivation::always(); - params.past_median_time_sample_rate = (10.0 * args.bps) as u64; - params.new_timestamp_deviation_tolerance = (600.0 * args.bps) as u64; - params.difficulty_sample_rate = (2.0 * args.bps) as u64; + params.crescendo_activation = ForkActivation::always(); + params.timestamp_deviation_tolerance = (600.0 * args.bps) as u64; + params.crescendo.past_median_time_sample_rate = (10.0 * args.bps) as u64; + params.crescendo.difficulty_sample_rate = (2.0 * args.bps) as u64; } - info!("2Dλ={}, GHOSTDAG K={}, DAA window size={}", 2.0 * args.delay * args.bps, k, params.difficulty_window_size(0)); + info!("2Dλ={}, GHOSTDAG K={}, DAA window size={}", 2.0 * args.delay * args.bps, k, params.difficulty_window_size().before()); } if args.test_pruning { params.pruning_proof_m = 16; - params.legacy_difficulty_window_size = 64; - params.legacy_timestamp_deviation_tolerance = 16; - params.new_timestamp_deviation_tolerance = 16; - params.sampled_difficulty_window_size = params.sampled_difficulty_window_size.min(32); - params.finality_depth = 128; - params.merge_depth = 128; - params.mergeset_size_limit = 32; - params.pruning_depth = params.anticone_finalization_depth(); - info!("Setting pruning depth to {}", params.pruning_depth); + params.prior_difficulty_window_size = 64; + params.timestamp_deviation_tolerance = 16; + params.crescendo.sampled_difficulty_window_size = params.crescendo.sampled_difficulty_window_size.min(32); + params.prior_finality_depth = 128; + params.prior_merge_depth = 128; + params.prior_mergeset_size_limit = 32; + params.prior_pruning_depth = params.anticone_finalization_depth().before(); + info!("Setting pruning depth to {}", params.prior_pruning_depth); } } @@ -349,7 +347,7 @@ fn apply_args_to_perf_params(args: &Args, perf_params: &mut PerfParams) { async fn validate(src_consensus: &Consensus, dst_consensus: &Consensus, params: &Params, delay: f64, bps: f64, header_only: bool) { let hashes = topologically_ordered_hashes(src_consensus, params.genesis.hash); let num_blocks = hashes.len(); - let num_txs = print_stats(src_consensus, &hashes, delay, bps, params.ghostdag_k); + let num_txs = print_stats(src_consensus, &hashes, delay, bps, params.ghostdag_k().before()); if header_only { info!("Validating {num_blocks} headers..."); } else { diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 958b4799e3..2217ef2dc3 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -157,7 +157,7 @@ impl Miner { .into_par_iter() .map(|mutable_tx| { let signed_tx = sign(mutable_tx, schnorr_key); - let mass = self.mass_calculator.calc_tx_overall_mass(&signed_tx.as_verifiable(), None).unwrap(); + let mass = self.mass_calculator.calc_contextual_masses(&signed_tx.as_verifiable()).unwrap().storage_mass; signed_tx.tx.set_mass(mass); let mut signed_tx = signed_tx.tx; signed_tx.finalize(); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 719158c8b0..bbe82ec333 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -17,18 +17,19 @@ use kaspa_consensus::model::stores::reachability::DbReachabilityStore; use kaspa_consensus::model::stores::relations::DbRelationsStore; use kaspa_consensus::model::stores::selected_chain::SelectedChainStoreReader; use kaspa_consensus::params::{ - ForkActivation, Params, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64, + ForkActivation, Params, CRESCENDO, DEVNET_PARAMS, MAINNET_PARAMS, MAX_DIFFICULTY_TARGET, MAX_DIFFICULTY_TARGET_AS_F64, }; use kaspa_consensus::pipeline::monitor::ConsensusMonitor; use kaspa_consensus::pipeline::ProcessingCounters; use kaspa_consensus::processes::reachability::tests::{DagBlock, DagBuilder, StoreValidationExtensions}; use kaspa_consensus::processes::window::{WindowManager, WindowType}; +use kaspa_consensus_core::api::args::TransactionValidationArgs; use kaspa_consensus_core::api::{BlockValidationFutures, ConsensusApi}; use kaspa_consensus_core::block::Block; use kaspa_consensus_core::blockhash::new_unique; use kaspa_consensus_core::blockstatus::BlockStatus; use kaspa_consensus_core::coinbase::MinerData; -use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}; +use kaspa_consensus_core::constants::{BLOCK_VERSION, SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER, TRANSIENT_BYTE_TO_MASS_FACTOR}; use kaspa_consensus_core::errors::block::{BlockProcessResult, RuleError}; use kaspa_consensus_core::header::Header; use kaspa_consensus_core::network::{NetworkId, NetworkType::Mainnet}; @@ -45,6 +46,7 @@ use kaspa_core::task::tick::TickService; use kaspa_core::time::unix_now; use kaspa_database::utils::get_kaspa_tempdir; use kaspa_hashes::Hash; +use kaspa_utils::arc::ArcExtensions; use crate::common; use flate2::read::GzDecoder; @@ -262,8 +264,8 @@ async fn ghostdag_test() { .skip_proof_of_work() .edit_consensus_params(|p| { p.genesis.hash = string_to_hash(&test.genesis_id); - p.ghostdag_k = test.k; - p.min_difficulty_window_len = p.legacy_difficulty_window_size; + p.prior_ghostdag_k = test.k; + p.min_difficulty_window_size = p.prior_difficulty_window_size; }) .build(); let consensus = TestConsensus::new(&config); @@ -337,7 +339,7 @@ async fn block_window_test() { .skip_proof_of_work() .edit_consensus_params(|p| { p.genesis.hash = string_to_hash("A"); - p.ghostdag_k = 1; + p.prior_ghostdag_k = 1; }) .build(); let consensus = TestConsensus::new(&config); @@ -427,7 +429,7 @@ async fn header_in_isolation_validation_test() { block.header.hash = 2.into(); let now = unix_now(); - let block_ts = now + config.legacy_timestamp_deviation_tolerance * config.target_time_per_block + 2000; + let block_ts = now + config.timestamp_deviation_tolerance * config.prior_target_time_per_block + 2000; block.header.timestamp = block_ts; match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::TimeTooFarIntoTheFuture(ts, _)) => { @@ -454,11 +456,11 @@ async fn header_in_isolation_validation_test() { { let mut block = block.clone(); block.header.hash = 4.into(); - block.header.parents_by_level[0] = (5..(config.max_block_parents + 6)).map(|x| (x as u64).into()).collect(); + block.header.parents_by_level[0] = (5..(config.prior_max_block_parents + 6)).map(|x| (x as u64).into()).collect(); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::TooManyParents(num_parents, limit)) => { - assert_eq!((config.max_block_parents + 1) as usize, num_parents); - assert_eq!(limit, config.max_block_parents as usize); + assert_eq!((config.prior_max_block_parents + 1) as usize, num_parents); + assert_eq!(limit, config.prior_max_block_parents as usize); } res => { panic!("Unexpected result: {res:?}") @@ -563,7 +565,7 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation = ForkActivation::never(); + p.crescendo_activation = ForkActivation::never(); }) .build(), }, @@ -572,10 +574,10 @@ async fn median_time_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.sampling_activation = ForkActivation::always(); - p.new_timestamp_deviation_tolerance = 120; - p.past_median_time_sample_rate = 3; - p.past_median_time_sampled_window_size = (2 * 120 - 1) / 3; + p.crescendo_activation = ForkActivation::always(); + p.timestamp_deviation_tolerance = 120; + p.crescendo.past_median_time_sample_rate = 3; + p.crescendo.past_median_time_sampled_window_size = (2 * 120 - 1) / 3; }) .build(), }, @@ -585,8 +587,9 @@ async fn median_time_test() { let consensus = TestConsensus::new(&test.config); let wait_handles = consensus.init(); - let num_blocks = test.config.past_median_time_window_size(0) as u64 * test.config.past_median_time_sample_rate(0); - let timestamp_deviation_tolerance = test.config.timestamp_deviation_tolerance(0); + let num_blocks = + test.config.past_median_time_window_size().before() as u64 * test.config.past_median_time_sample_rate().before(); + let timestamp_deviation_tolerance = test.config.timestamp_deviation_tolerance; for i in 1..(num_blocks + 1) { let parent = if i == 1 { test.config.genesis.hash } else { (i - 1).into() }; let mut block = consensus.build_block_with_parents(i.into(), vec![parent]); @@ -630,7 +633,7 @@ async fn mergeset_size_limit_test() { let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); - let num_blocks_per_chain = config.mergeset_size_limit + 1; + let num_blocks_per_chain = config.prior_mergeset_size_limit + 1; let mut tip1_hash = config.genesis.hash; for i in 1..(num_blocks_per_chain + 1) { @@ -649,8 +652,8 @@ async fn mergeset_size_limit_test() { let block = consensus.build_block_with_parents((3 * num_blocks_per_chain + 1).into(), vec![tip1_hash, tip2_hash]); match consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await { Err(RuleError::MergeSetTooBig(a, b)) => { - assert_eq!(a, config.mergeset_size_limit + 1); - assert_eq!(b, config.mergeset_size_limit); + assert_eq!(a, config.prior_mergeset_size_limit + 1); + assert_eq!(b, config.prior_mergeset_size_limit); } res => { panic!("Unexpected result: {res:?}") @@ -811,24 +814,18 @@ impl KaspadGoParams { dns_seeders: &[], net: NetworkId { network_type: Mainnet, suffix: None }, genesis: GENESIS, - ghostdag_k: self.K, - legacy_timestamp_deviation_tolerance: self.TimestampDeviationTolerance, - new_timestamp_deviation_tolerance: self.TimestampDeviationTolerance, - past_median_time_sample_rate: 1, - past_median_time_sampled_window_size: 2 * self.TimestampDeviationTolerance - 1, - target_time_per_block: self.TargetTimePerBlock / 1_000_000, - sampling_activation: ForkActivation::never(), - max_block_parents: self.MaxBlockParents, + prior_ghostdag_k: self.K, + timestamp_deviation_tolerance: self.TimestampDeviationTolerance, + prior_target_time_per_block: self.TargetTimePerBlock / 1_000_000, + prior_max_block_parents: self.MaxBlockParents, max_difficulty_target: MAX_DIFFICULTY_TARGET, max_difficulty_target_f64: MAX_DIFFICULTY_TARGET_AS_F64, - difficulty_sample_rate: 1, - sampled_difficulty_window_size: self.DifficultyAdjustmentWindowSize, - legacy_difficulty_window_size: self.DifficultyAdjustmentWindowSize, - min_difficulty_window_len: self.DifficultyAdjustmentWindowSize, - mergeset_size_limit: self.MergeSetSizeLimit, - merge_depth: self.MergeDepth, - finality_depth, - pruning_depth: 2 * finality_depth + 4 * self.MergeSetSizeLimit * self.K as u64 + 2 * self.K as u64 + 2, + prior_difficulty_window_size: self.DifficultyAdjustmentWindowSize, + min_difficulty_window_size: self.DifficultyAdjustmentWindowSize, + prior_mergeset_size_limit: self.MergeSetSizeLimit, + prior_merge_depth: self.MergeDepth, + prior_finality_depth: finality_depth, + prior_pruning_depth: 2 * finality_depth + 4 * self.MergeSetSizeLimit * self.K as u64 + 2 * self.K as u64 + 2, coinbase_payload_script_public_key_max_len: self.CoinbasePayloadScriptPublicKeyMaxLength, max_coinbase_payload_len: self.MaxCoinbasePayloadLength, max_tx_inputs: MAINNET_PARAMS.max_tx_inputs, @@ -840,16 +837,14 @@ impl KaspadGoParams { mass_per_sig_op: self.MassPerSigOp, max_block_mass: self.MaxBlockMass, storage_mass_parameter: STORAGE_MASS_PARAMETER, - storage_mass_activation: ForkActivation::never(), - kip10_activation: ForkActivation::never(), deflationary_phase_daa_score: self.DeflationaryPhaseDaaScore, pre_deflationary_phase_base_subsidy: self.PreDeflationaryPhaseBaseSubsidy, coinbase_maturity: MAINNET_PARAMS.coinbase_maturity, skip_proof_of_work: self.SkipProofOfWork, max_block_level: self.MaxBlockLevel, pruning_proof_m: self.PruningProofM, - payload_activation: ForkActivation::never(), - runtime_sig_op_counting: ForkActivation::never(), + crescendo: CRESCENDO, + crescendo_activation: ForkActivation::never(), } } } @@ -935,13 +930,13 @@ async fn json_test(file_path: &str, concurrency: bool) { let genesis_block = json_line_to_block(second_line); params.genesis = (genesis_block.header.as_ref(), DEVNET_PARAMS.genesis.coinbase_payload).into(); } - params.min_difficulty_window_len = params.legacy_difficulty_window_size; + params.min_difficulty_window_size = params.prior_difficulty_window_size; params } else { let genesis_block = json_line_to_block(first_line); let mut params = DEVNET_PARAMS; params.genesis = (genesis_block.header.as_ref(), params.genesis.coinbase_payload).into(); - params.min_difficulty_window_len = params.legacy_difficulty_window_size; + params.min_difficulty_window_size = params.prior_difficulty_window_size; params }; @@ -1267,18 +1262,21 @@ async fn bounded_merge_depth_test() { let config = ConfigBuilder::new(DEVNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 5; - p.merge_depth = 7; + p.prior_ghostdag_k = 5; + p.prior_merge_depth = 7; }) .build(); - assert!((config.ghostdag_k as u64) < config.merge_depth, "K must be smaller than merge depth for this test to run"); + assert!( + (config.ghostdag_k().before() as u64) < config.prior_merge_depth, + "K must be smaller than merge depth for this test to run" + ); let consensus = TestConsensus::new(&config); let wait_handles = consensus.init(); let mut selected_chain = vec![config.genesis.hash]; - for i in 1..(config.merge_depth + 3) { + for i in 1..(config.prior_merge_depth + 3) { let hash: Hash = (i + 1).into(); consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); @@ -1286,8 +1284,8 @@ async fn bounded_merge_depth_test() { // The length of block_chain_2 is shorter by one than selected_chain, so selected_chain will remain the selected chain. let mut block_chain_2 = vec![config.genesis.hash]; - for i in 1..(config.merge_depth + 2) { - let hash: Hash = (i + config.merge_depth + 3).into(); + for i in 1..(config.prior_merge_depth + 2) { + let hash: Hash = (i + config.prior_merge_depth + 3).into(); consensus.add_block_with_parents(hash, vec![*block_chain_2.last().unwrap()]).await.unwrap(); block_chain_2.push(hash); } @@ -1323,7 +1321,7 @@ async fn bounded_merge_depth_test() { .unwrap(); // We extend the selected chain until kosherizing_hash will be red from the virtual POV. - for i in 0..config.ghostdag_k { + for i in 0..config.ghostdag_k().before() { let hash = Hash::from_u64_word((i + 1) as u64 * 1000); consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); @@ -1347,7 +1345,7 @@ async fn difficulty_test() { async fn add_block(consensus: &TestConsensus, block_time: Option, parents: Vec) -> Header { let selected_parent = consensus.ghostdag_manager().find_selected_parent(parents.iter().copied()); let block_time = block_time.unwrap_or_else(|| { - consensus.headers_store().get_timestamp(selected_parent).unwrap() + consensus.params().target_time_per_block(0) + consensus.headers_store().get_timestamp(selected_parent).unwrap() + consensus.params().prior_target_time_per_block }); let mut header = consensus.build_header_with_parents(new_unique(), parents); header.timestamp = block_time; @@ -1370,7 +1368,8 @@ async fn difficulty_test() { } fn full_window_bits(consensus: &TestConsensus, hash: Hash) -> u32 { - let window_size = consensus.params().difficulty_window_size(0) * consensus.params().difficulty_sample_rate(0) as usize; + let window_size = + consensus.params().difficulty_window_size().before() * consensus.params().difficulty_sample_rate().before() as usize; let ghostdag_data = &consensus.ghostdag_store().get_data(hash).unwrap(); let window = consensus.window_manager().block_window(ghostdag_data, WindowType::VaryingWindow(window_size)).unwrap(); assert_eq!(window.blocks.len(), window_size); @@ -1385,12 +1384,12 @@ async fn difficulty_test() { } const FULL_WINDOW_SIZE: usize = 90; - const SAMPLED_WINDOW_SIZE: usize = 11; + const SAMPLED_WINDOW_SIZE: u64 = 11; const SAMPLE_RATE: u64 = 6; const PMT_DEVIATION_TOLERANCE: u64 = 20; const PMT_SAMPLE_RATE: u64 = 3; const PMT_SAMPLED_WINDOW_SIZE: u64 = 13; - const HIGH_BPS_SAMPLED_WINDOW_SIZE: usize = 12; + const HIGH_BPS_SAMPLED_WINDOW_SIZE: u64 = 12; const HIGH_BPS: u64 = 4; let tests = vec![ Test { @@ -1399,12 +1398,12 @@ async fn difficulty_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 1; - p.legacy_difficulty_window_size = FULL_WINDOW_SIZE; - p.sampling_activation = ForkActivation::never(); + p.prior_ghostdag_k = 1; + p.prior_difficulty_window_size = FULL_WINDOW_SIZE; + p.crescendo_activation = ForkActivation::never(); // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window - p.legacy_timestamp_deviation_tolerance = 60; + p.timestamp_deviation_tolerance = 60; }) .build(), }, @@ -1414,15 +1413,17 @@ async fn difficulty_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 1; - p.sampled_difficulty_window_size = SAMPLED_WINDOW_SIZE; - p.difficulty_sample_rate = SAMPLE_RATE; - p.sampling_activation = ForkActivation::always(); + p.prior_ghostdag_k = 1; + p.crescendo.ghostdag_k = 1; + p.crescendo.sampled_difficulty_window_size = SAMPLED_WINDOW_SIZE; + p.crescendo.difficulty_sample_rate = SAMPLE_RATE; + p.crescendo_activation = ForkActivation::always(); + p.prior_target_time_per_block = p.crescendo.target_time_per_block; // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window - p.past_median_time_sample_rate = PMT_SAMPLE_RATE; - p.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; - p.new_timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; + p.crescendo.past_median_time_sample_rate = PMT_SAMPLE_RATE; + p.crescendo.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; + p.timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; }) .build(), }, @@ -1432,16 +1433,18 @@ async fn difficulty_test() { config: ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.ghostdag_k = 1; - p.target_time_per_block /= HIGH_BPS; - p.sampled_difficulty_window_size = HIGH_BPS_SAMPLED_WINDOW_SIZE; - p.difficulty_sample_rate = SAMPLE_RATE * HIGH_BPS; - p.sampling_activation = ForkActivation::always(); + p.prior_ghostdag_k = 1; + p.crescendo.ghostdag_k = 1; + p.prior_target_time_per_block /= HIGH_BPS; + p.crescendo.sampled_difficulty_window_size = HIGH_BPS_SAMPLED_WINDOW_SIZE; + p.crescendo.difficulty_sample_rate = SAMPLE_RATE * HIGH_BPS; + p.crescendo_activation = ForkActivation::always(); + p.prior_target_time_per_block = p.crescendo.target_time_per_block; // Define past median time so that calls to add_block_with_min_time create blocks // which timestamps fit within the min-max timestamps found in the difficulty window - p.past_median_time_sample_rate = PMT_SAMPLE_RATE * HIGH_BPS; - p.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; - p.new_timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; + p.crescendo.past_median_time_sample_rate = PMT_SAMPLE_RATE * HIGH_BPS; + p.crescendo.past_median_time_sampled_window_size = PMT_SAMPLED_WINDOW_SIZE; + p.timestamp_deviation_tolerance = PMT_DEVIATION_TOLERANCE; }) .build(), }, @@ -1452,8 +1455,8 @@ async fn difficulty_test() { let consensus = TestConsensus::new(&test.config); let wait_handles = consensus.init(); - let sample_rate = test.config.difficulty_sample_rate(0); - let expanded_window_size = test.config.difficulty_window_size(0) * sample_rate as usize; + let sample_rate = test.config.difficulty_sample_rate().before(); + let expanded_window_size = test.config.difficulty_window_size().before() * sample_rate as usize; let fake_genesis = Header { hash: test.config.genesis.hash, @@ -1569,7 +1572,7 @@ async fn difficulty_test() { for _ in 0..sample_rate { if (tip.daa_score + 1) % sample_rate == 0 { // This block should be part of the sampled window - let slow_block_time = tip.timestamp + test.config.target_time_per_block * 3; + let slow_block_time = tip.timestamp + test.config.prior_target_time_per_block * 3; let slow_block = add_block(&consensus, Some(slow_block_time), vec![tip.hash]).await; tip = slow_block; break; @@ -1668,7 +1671,7 @@ async fn selected_chain_test() { let config = ConfigBuilder::new(MAINNET_PARAMS) .skip_proof_of_work() .edit_consensus_params(|p| { - p.min_difficulty_window_len = p.legacy_difficulty_window_size; + p.min_difficulty_window_size = p.prior_difficulty_window_size; }) .build(); let consensus = TestConsensus::new(&config); @@ -1812,7 +1815,7 @@ async fn run_kip10_activation_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.kip10_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::new(KIP10_ACTIVATION_DAA_SCORE); }) .build(); @@ -1832,7 +1835,7 @@ async fn run_kip10_activation_test() { assert_eq!(consensus.get_virtual_daa_score(), index); // Create transaction that attempts to use the KIP-10 opcode - let mut spending_tx = Transaction::new( + let mut tx = Transaction::new( 0, vec![TransactionInput::new( initial_utxo_collection[0].0, @@ -1846,8 +1849,14 @@ async fn run_kip10_activation_test() { 0, vec![], ); - spending_tx.finalize(); - let tx_id = spending_tx.id(); + tx.finalize(); + let tx_id = tx.id(); + + let mut tx = MutableTransaction::from_tx(tx); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + let tx = tx.tx.unwrap_or_clone(); + // Test 1: Build empty block, then manually insert invalid tx and verify consensus rejects it { let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); @@ -1857,8 +1866,9 @@ async fn run_kip10_activation_test() { consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); // Insert our test transaction and recalculate block hashes - block.transactions.push(spending_tx.clone()); - block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + block.transactions.push(tx.clone()); + block.header.hash_merkle_root = + calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); assert_eq!(consensus.lkg_virtual_state.load().daa_score, 2); @@ -1869,7 +1879,7 @@ async fn run_kip10_activation_test() { index += 1; // Test 2: Verify the same transaction is accepted after activation - let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![spending_tx.clone()]).await; + let status = consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.clone()]).await; assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); } @@ -1880,7 +1890,7 @@ async fn payload_test() { .skip_proof_of_work() .edit_consensus_params(|p| { p.coinbase_maturity = 0; - p.payload_activation = ForkActivation::always() + p.crescendo_activation = ForkActivation::always() }) .build(); let consensus = TestConsensus::new(&config); @@ -1890,22 +1900,38 @@ async fn payload_test() { let b = consensus.build_utxo_valid_block_with_parents(1.into(), vec![config.genesis.hash], miner_data.clone(), vec![]); consensus.validate_and_insert_block(b.to_immutable()).virtual_state_task.await.unwrap(); let funding_block = consensus.build_utxo_valid_block_with_parents(2.into(), vec![1.into()], miner_data, vec![]); - let cb_id = { + let (cb_id, cb_amount) = { let mut cb = funding_block.transactions[0].clone(); cb.finalize(); - cb.id() + (cb.id(), cb.outputs[0].value) }; + consensus.validate_and_insert_block(funding_block.to_immutable()).virtual_state_task.await.unwrap(); - let tx = Transaction::new( + let mut txx = Transaction::new( 0, vec![TransactionInput::new(TransactionOutpoint { transaction_id: cb_id, index: 0 }, vec![], 0, 0)], - vec![TransactionOutput::new(1, ScriptPublicKey::default())], + vec![TransactionOutput::new(cb_amount / 2, ScriptPublicKey::default())], 0, SubnetworkId::default(), 0, - vec![0; (config.params.max_block_mass / 2) as usize], + vec![0; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize], ); - consensus.add_utxo_valid_block_with_parents(3.into(), vec![2.into()], vec![tx]).await.unwrap(); + + // Create a tx with transient mass over the block limit + txx.payload = vec![0; (config.params.max_block_mass / 2) as usize]; + let mut tx = MutableTransaction::from_tx(txx.clone()); + // This triggers storage mass population + consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()).unwrap(); + let consensus_res = consensus.add_utxo_valid_block_with_parents(4.into(), vec![2.into()], vec![tx.tx.unwrap_or_clone()]).await; + assert_match!(consensus_res, Err(RuleError::ExceedsTransientMassLimit(_, _))); + + // Fix the payload to be below the limit + txx.payload = vec![0; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; + let mut tx = MutableTransaction::from_tx(txx.clone()); + // This triggers storage mass population + consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()).unwrap(); + let status = consensus.add_utxo_valid_block_with_parents(3.into(), vec![2.into()], vec![tx.tx.unwrap_or_clone()]).await; + assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); consensus.shutdown(wait_handles); } @@ -1943,7 +1969,7 @@ async fn payload_activation_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.payload_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::new(PAYLOAD_ACTIVATION_DAA_SCORE); }) .build(); @@ -1963,7 +1989,7 @@ async fn payload_activation_test() { assert_eq!(consensus.get_virtual_daa_score(), index); // Create transaction with large payload - let large_payload = vec![0u8; (config.params.max_block_mass / 2) as usize]; + let large_payload = vec![0u8; (config.params.max_block_mass / TRANSIENT_BYTE_TO_MASS_FACTOR / 2) as usize]; let mut tx_with_payload = Transaction::new( 0, vec![TransactionInput::new( @@ -1989,10 +2015,15 @@ async fn payload_activation_test() { let mut block = consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); + let mut tx = MutableTransaction::from_tx(tx_with_payload.clone()); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + // Insert our test transaction and recalculate block hashes - block.transactions.push(tx_with_payload.clone()); + block.transactions.push(tx.tx.unwrap_or_clone()); - block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + block.header.hash_merkle_root = + calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; assert!(matches!(block_status, Err(RuleError::TxInContextFailed(tx, TxRuleError::NonCoinbaseTxHasPayload)) if tx == tx_id)); assert_eq!(consensus.lkg_virtual_state.load().daa_score, PAYLOAD_ACTIVATION_DAA_SCORE - 1); @@ -2003,9 +2034,13 @@ async fn payload_activation_test() { consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![(index - 1).into()], vec![]).await.unwrap(); index += 1; + let mut tx = MutableTransaction::from_tx(tx_with_payload.clone()); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + // Test 2: Verify the same transaction is accepted after activation let status = - consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx_with_payload.clone()]).await; + consensus.add_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], vec![tx.tx.unwrap_or_clone()]).await; assert!(matches!(status, Ok(BlockStatus::StatusUTXOValid))); assert!(consensus.lkg_virtual_state.load().accepted_tx_ids.contains(&tx_id)); @@ -2067,7 +2102,7 @@ async fn runtime_sig_op_counting_test() { cfg.params.genesis.hash = genesis_header.hash; }) .edit_consensus_params(|p| { - p.runtime_sig_op_counting = ForkActivation::new(RUNTIME_SIGOP_ACTIVATION_DAA_SCORE); + p.crescendo_activation = ForkActivation::new(RUNTIME_SIGOP_ACTIVATION_DAA_SCORE); }) .build(); @@ -2120,13 +2155,19 @@ async fn runtime_sig_op_counting_test() { tx.finalize(); + let mut tx = MutableTransaction::from_tx(tx); + // This triggers storage mass population + let _ = consensus.validate_mempool_transaction(&mut tx, &TransactionValidationArgs::default()); + let tx = tx.tx.unwrap_or_clone(); + // Test 1: Before activation, tx should be rejected due to static sig op counting (sees 3 ops) { let miner_data = MinerData::new(ScriptPublicKey::from_vec(0, vec![]), vec![]); let mut block = consensus.build_utxo_valid_block_with_parents((index + 1).into(), vec![index.into()], miner_data.clone(), vec![]); block.transactions.push(tx.clone()); - block.header.hash_merkle_root = calc_hash_merkle_root(block.transactions.iter(), false); + block.header.hash_merkle_root = + calc_hash_merkle_root(block.transactions.iter(), config.crescendo_activation.is_active(block.header.daa_score)); let block_status = consensus.validate_and_insert_block(block.to_immutable()).virtual_state_task.await; assert!(matches!(block_status, Ok(BlockStatus::StatusDisqualifiedFromChain))); index += 1; diff --git a/testing/integration/src/consensus_pipeline_tests.rs b/testing/integration/src/consensus_pipeline_tests.rs index 0b252c3813..a6dc387133 100644 --- a/testing/integration/src/consensus_pipeline_tests.rs +++ b/testing/integration/src/consensus_pipeline_tests.rs @@ -94,7 +94,7 @@ async fn test_concurrent_pipeline_random() { let mut tips = vec![genesis]; let mut total = 1000i64; while total > 0 { - let v = min(config.max_block_parents as i64, poi.sample(&mut thread_rng) as i64); + let v = min(config.prior_max_block_parents as i64, poi.sample(&mut thread_rng) as i64); if v == 0 { continue; } diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 00d9b78032..a8bed754fb 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -105,7 +105,7 @@ async fn bench_bbt_latency() { let bbt_client = daemon.new_client().await; // The time interval between Poisson(lambda) events distributes ~Exp(lambda) - let dist: Exp = Exp::new(params.bps() as f64).unwrap(); + let dist: Exp = Exp::new(params.bps().upper_bound() as f64).unwrap(); let comm_delay = 1000; // Mining key and address @@ -347,8 +347,15 @@ async fn bench_bbt_latency_2() { .launch() .await .task( - MinerGroupTask::build(network, client_manager.clone(), SUBMIT_BLOCK_CLIENTS, params.bps(), BLOCK_COUNT, Stopper::Signal) - .await, + MinerGroupTask::build( + network, + client_manager.clone(), + SUBMIT_BLOCK_CLIENTS, + params.bps().upper_bound(), + BLOCK_COUNT, + Stopper::Signal, + ) + .await, ) .task( TxSenderGroupTask::build( diff --git a/testing/integration/src/subscribe_benchmarks.rs b/testing/integration/src/subscribe_benchmarks.rs index 8efefd8427..367495eaa9 100644 --- a/testing/integration/src/subscribe_benchmarks.rs +++ b/testing/integration/src/subscribe_benchmarks.rs @@ -231,8 +231,15 @@ async fn utxos_changed_subscriptions_client(address_cycle_seconds: u64, address_ .task(TickTask::build(tick_service.clone())) .task(MemoryMonitorTask::build(tick_service.clone(), "client", Duration::from_secs(5), MAX_MEMORY)) .task( - MinerGroupTask::build(network, client_manager.clone(), SUBMIT_BLOCK_CLIENTS, params.bps(), BLOCK_COUNT, Stopper::Signal) - .await, + MinerGroupTask::build( + network, + client_manager.clone(), + SUBMIT_BLOCK_CLIENTS, + params.bps().upper_bound(), + BLOCK_COUNT, + Stopper::Signal, + ) + .await, ) .task( TxSenderGroupTask::build( @@ -250,7 +257,7 @@ async fn utxos_changed_subscriptions_client(address_cycle_seconds: u64, address_ SubscriberGroupTask::build( client_manager, SUBSCRIBE_WORKERS, - params.bps(), + params.bps().upper_bound(), vec![VirtualDaaScoreChangedScope {}.into()], 3, subscribing_addresses, diff --git a/wallet/core/src/tx/generator/test.rs b/wallet/core/src/tx/generator/test.rs index e1db97c446..bf9c71f9d4 100644 --- a/wallet/core/src/tx/generator/test.rs +++ b/wallet/core/src/tx/generator/test.rs @@ -140,7 +140,7 @@ impl GeneratorExtension for Generator { fn test_network_id() -> NetworkId { // TODO make this configurable - NetworkId::with_suffix(NetworkType::Testnet, 11) + NetworkId::with_suffix(NetworkType::Testnet, 10) } #[derive(Default)] diff --git a/wallet/core/src/utxo/test.rs b/wallet/core/src/utxo/test.rs index a1b41f9987..516383aab8 100644 --- a/wallet/core/src/utxo/test.rs +++ b/wallet/core/src/utxo/test.rs @@ -22,7 +22,7 @@ async fn test_utxo_subsystem_bootstrap() -> Result<()> { #[test] fn test_utxo_generator_empty_utxo_noop() -> Result<()> { - let network_id = NetworkId::with_suffix(NetworkType::Testnet, 11); + let network_id = NetworkId::with_suffix(NetworkType::Testnet, 10); let output_address = output_address(network_id.into()); let payment_output = PaymentOutput::new(output_address, kaspa_to_sompi(2.0));