From b8febf47fae4d996e716aadf3b963dd689ff5b83 Mon Sep 17 00:00:00 2001 From: sanlee42 <hit.cs.lijun@gmail.com> Date: Sat, 18 Nov 2023 04:01:49 +0000 Subject: [PATCH] Fix compile error --- Cargo.lock | 54 +- Cargo.toml | 2 - chain/src/chain.rs | 18 +- consensus/src/dag/blockdag.rs | 10 +- flexidag/Cargo.toml | 28 +- flexidag/service/Cargo.toml | 41 -- flexidag/service/src/lib.rs | 1 - flexidag/service/src/service.rs | 263 ------- flexidag/src/blockdag.rs | 260 ------- flexidag/src/consensusdb/access.rs | 199 ----- flexidag/src/consensusdb/cache.rs | 44 -- .../src/consensusdb/consensus_ghostdag.rs | 512 ------------- flexidag/src/consensusdb/consensus_header.rs | 216 ------ .../src/consensusdb/consensus_reachability.rs | 540 -------------- .../src/consensusdb/consensus_relations.rs | 316 -------- flexidag/src/consensusdb/db.rs | 149 ---- flexidag/src/consensusdb/error.rs | 58 -- flexidag/src/consensusdb/item.rs | 81 --- flexidag/src/consensusdb/mod.rs | 31 - flexidag/src/consensusdb/schema.rs | 40 - flexidag/src/consensusdb/writer.rs | 75 -- flexidag/src/dag/ghostdag/mergeset.rs | 71 -- flexidag/src/dag/ghostdag/mod.rs | 4 - flexidag/src/dag/ghostdag/protocol.rs | 338 --------- flexidag/src/dag/ghostdag/util.rs | 57 -- flexidag/src/dag/mod.rs | 3 - flexidag/src/dag/reachability/extensions.rs | 50 -- flexidag/src/dag/reachability/inquirer.rs | 345 --------- flexidag/src/dag/reachability/mod.rs | 50 -- .../dag/reachability/reachability_service.rs | 315 -------- flexidag/src/dag/reachability/reindex.rs | 684 ------------------ .../src/dag/reachability/relations_service.rs | 34 - flexidag/src/dag/reachability/tests.rs | 264 ------- flexidag/src/dag/reachability/tree.rs | 161 ----- flexidag/src/dag/types/ghostdata.rs | 147 ---- flexidag/src/dag/types/interval.rs | 377 ---------- flexidag/src/dag/types/mod.rs | 6 - flexidag/src/dag/types/ordering.rs | 36 - flexidag/src/dag/types/perf.rs | 51 -- flexidag/src/dag/types/reachability.rs | 26 - flexidag/src/dag/types/trusted.rs | 26 - flexidag/src/flexidag_service.rs | 568 +++++++++++++++ flexidag/src/lib.rs | 24 +- .../block_connector_service.rs | 2 +- sync/src/tasks/block_sync_task.rs | 4 +- 45 files changed, 602 insertions(+), 5979 deletions(-) delete mode 100644 flexidag/service/Cargo.toml delete mode 100644 flexidag/service/src/lib.rs delete mode 100644 flexidag/service/src/service.rs delete mode 100644 flexidag/src/blockdag.rs delete mode 100644 flexidag/src/consensusdb/access.rs delete mode 100644 flexidag/src/consensusdb/cache.rs delete mode 100644 flexidag/src/consensusdb/consensus_ghostdag.rs delete mode 100644 flexidag/src/consensusdb/consensus_header.rs delete mode 100644 flexidag/src/consensusdb/consensus_reachability.rs delete mode 100644 flexidag/src/consensusdb/consensus_relations.rs delete mode 100644 flexidag/src/consensusdb/db.rs delete mode 100644 flexidag/src/consensusdb/error.rs delete mode 100644 flexidag/src/consensusdb/item.rs delete mode 100644 flexidag/src/consensusdb/mod.rs delete mode 100644 flexidag/src/consensusdb/schema.rs delete mode 100644 flexidag/src/consensusdb/writer.rs delete mode 100644 flexidag/src/dag/ghostdag/mergeset.rs delete mode 100644 flexidag/src/dag/ghostdag/mod.rs delete mode 100644 flexidag/src/dag/ghostdag/protocol.rs delete mode 100644 flexidag/src/dag/ghostdag/util.rs delete mode 100644 flexidag/src/dag/mod.rs delete mode 100644 flexidag/src/dag/reachability/extensions.rs delete mode 100644 flexidag/src/dag/reachability/inquirer.rs delete mode 100644 flexidag/src/dag/reachability/mod.rs delete mode 100644 flexidag/src/dag/reachability/reachability_service.rs delete mode 100644 flexidag/src/dag/reachability/reindex.rs delete mode 100644 flexidag/src/dag/reachability/relations_service.rs delete mode 100644 flexidag/src/dag/reachability/tests.rs delete mode 100644 flexidag/src/dag/reachability/tree.rs delete mode 100644 flexidag/src/dag/types/ghostdata.rs delete mode 100644 flexidag/src/dag/types/interval.rs delete mode 100644 flexidag/src/dag/types/mod.rs delete mode 100644 flexidag/src/dag/types/ordering.rs delete mode 100644 flexidag/src/dag/types/perf.rs delete mode 100644 flexidag/src/dag/types/reachability.rs delete mode 100644 flexidag/src/dag/types/trusted.rs create mode 100644 flexidag/src/flexidag_service.rs diff --git a/Cargo.lock b/Cargo.lock index c2b978862e..cfd552cd93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2813,40 +2813,6 @@ dependencies = [ "miniz_oxide", ] -[[package]] -name = "flexidag-service" -version = "1.13.7" -dependencies = [ - "anyhow", - "bcs-ext", - "bincode", - "byteorder", - "futures 0.3.26", - "hex", - "itertools", - "once_cell", - "parking_lot 0.12.1", - "proptest", - "proptest-derive", - "rand 0.8.5", - "rand_core 0.6.4", - "rocksdb", - "rust-argon2", - "serde 1.0.152", - "sha3", - "starcoin-chain-api", - "starcoin-config", - "starcoin-crypto", - "starcoin-flexidag", - "starcoin-logger", - "starcoin-state-api", - "starcoin-storage", - "starcoin-time-service", - "starcoin-types", - "starcoin-vm-types", - "thiserror", -] - [[package]] name = "fnv" version = "1.0.7" @@ -9737,33 +9703,15 @@ version = "1.13.7" dependencies = [ "anyhow", "async-trait", - "bcs-ext", - "bincode", - "byteorder", "futures 0.3.26", - "hex", - "itertools", - "once_cell", - "parking_lot 0.12.1", - "proptest", - "proptest-derive", - "rand 0.8.5", - "rand_core 0.6.4", - "rocksdb", - "rust-argon2", - "serde 1.0.152", - "sha3", "starcoin-accumulator", - "starcoin-chain-api", "starcoin-config", + "starcoin-consensus", "starcoin-crypto", "starcoin-logger", "starcoin-service-registry", - "starcoin-state-api", "starcoin-storage", - "starcoin-time-service", "starcoin-types", - "starcoin-vm-types", "thiserror", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 6fab846e5b..bd16956c88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,7 +112,6 @@ members = [ "cmd/db-exporter", "cmd/genesis-nft-miner", "flexidag", - "flexidag/service" ] default-members = [ @@ -220,7 +219,6 @@ default-members = [ "cmd/miner_client/api", "cmd/db-exporter", "flexidag", - "flexidag/service", ] [profile.dev] diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e577128e52..17174a9a7a 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -14,8 +14,7 @@ use starcoin_chain_api::{ ExecutedBlock, MintedUncleNumber, TransactionInfoWithProof, VerifiedBlock, VerifyBlockField, }; use starcoin_config::ChainNetworkID; -use starcoin_consensus::dag::types::ghostdata::GhostdagData; -use starcoin_consensus::{BlockDAG, Consensus, FlexiDagStorage}; +use starcoin_consensus::Consensus; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; use starcoin_executor::VMMetrics; @@ -31,7 +30,6 @@ use starcoin_types::block::BlockIdAndNumber; use starcoin_types::contract_event::ContractEventInfo; use starcoin_types::dag_block::KTotalDifficulty; use starcoin_types::filter::Filter; -use starcoin_types::header::DagHeader; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; use starcoin_types::transaction::RichTransactionInfo; use starcoin_types::{ @@ -47,7 +45,6 @@ use starcoin_vm_types::account_config::genesis_address; use starcoin_vm_types::genesis_config::ConsensusStrategy; use starcoin_vm_types::on_chain_resource::Epoch; use std::cmp::min; -use std::collections::BTreeSet; use std::iter::Extend; use std::option::Option::{None, Some}; use std::{collections::HashMap, sync::Arc}; @@ -107,19 +104,6 @@ impl BlockChain { .ok_or_else(|| format_err!("Can not find genesis hash in storage."))?; let head_id = head_block.id(); watch(CHAIN_WATCH_NAME, "n1253"); - - // let dag_accumulator = match storage.get_dag_accumulator_info(head_id)? { - // Some(accmulator_info) => Some(info_2_accumulator( - // accmulator_info, - // AccumulatorStoreType::SyncDag, - // storage.as_ref(), - // )), - // None => None, - // }; - // let dag_snapshot_tips = storage - // .get_accumulator_snapshot_storage() - // .get(head_id)? - // .map(|snapshot| snapshot.child_hashes); let mut chain = Self { genesis_hash: genesis, time_service, diff --git a/consensus/src/dag/blockdag.rs b/consensus/src/dag/blockdag.rs index 9398592aac..1c3d1816ea 100644 --- a/consensus/src/dag/blockdag.rs +++ b/consensus/src/dag/blockdag.rs @@ -18,11 +18,11 @@ use starcoin_accumulator::accumulator_info::AccumulatorInfo; use starcoin_accumulator::node::AccumulatorStoreType; use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_config::{NodeConfig, RocksdbConfig}; -use starcoin_crypto::HashValue as Hash; +use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_storage::flexi_dag::SyncFlexiDagSnapshotHasher; use starcoin_storage::storage::CodecKVStore; use starcoin_storage::{BlockStore, Storage, Store, SyncFlexiDagStore}; -use starcoin_types::block::BlockNumber; +use starcoin_types::block::{BlockHeader, BlockNumber}; use starcoin_types::dag_block::KTotalDifficulty; use starcoin_types::startup_info; use starcoin_types::{ @@ -239,6 +239,12 @@ impl BlockDAG { ) -> Result<(), StoreError> { self.storage.relations_store.insert(child, parents) } + + + pub fn get_ghostdag_data_by_child(&self, hash: Hash) -> anyhow::Result<Arc<GhostdagData>> { + let ghostdata = self.storage.ghost_dag_store.get_data(hash)?; + return Ok(ghostdata); + } } #[cfg(test)] diff --git a/flexidag/Cargo.toml b/flexidag/Cargo.toml index 999bca15f4..79d1439fa4 100644 --- a/flexidag/Cargo.toml +++ b/flexidag/Cargo.toml @@ -9,36 +9,20 @@ homepage = { workspace = true } repository = { workspace = true } rust-version = { workspace = true } +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -byteorder = { workspace = true } - futures = { workspace = true } -hex = { default-features = false, workspace = true } -once_cell = { workspace = true } -proptest = { default-features = false, optional = true, workspace = true } -proptest-derive = { default-features = false, optional = true, workspace = true } -rand = { workspace = true } -rand_core = { default-features = false, workspace = true } -rust-argon2 = { workspace = true } -sha3 = { workspace = true } -starcoin-chain-api = { workspace = true } +starcoin-config = { workspace = true } starcoin-crypto = { workspace = true } starcoin-logger = { workspace = true } starcoin-service-registry = { workspace = true } -starcoin-state-api = { workspace = true } -starcoin-time-service = { workspace = true } +starcoin-storage = { workspace = true } starcoin-types = { workspace = true } -starcoin-vm-types = { workspace = true } tokio = { workspace = true } +starcoin-consensus = { workspace = true } starcoin-accumulator = { workspace = true } thiserror = { workspace = true } -rocksdb = { workspace = true } -bincode = { workspace = true } -serde = { workspace = true } -starcoin-storage = { workspace = true } -parking_lot = { workspace = true } -itertools = { workspace = true } -starcoin-config = { workspace = true } -bcs-ext = { workspace = true } + diff --git a/flexidag/service/Cargo.toml b/flexidag/service/Cargo.toml deleted file mode 100644 index 96b5a0cf2d..0000000000 --- a/flexidag/service/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -authors = { workspace = true } -edition = { workspace = true } -license = { workspace = true } -name = "flexidag-service" -publish = { workspace = true } -version = "1.13.7" -homepage = { workspace = true } -repository = { workspace = true } -rust-version = { workspace = true } - -[dependencies] -anyhow = { workspace = true } -byteorder = { workspace = true } - -futures = { workspace = true } -hex = { default-features = false, workspace = true } -once_cell = { workspace = true } -proptest = { default-features = false, optional = true, workspace = true } -proptest-derive = { default-features = false, optional = true, workspace = true } -rand = { workspace = true } -rand_core = { default-features = false, workspace = true } -rust-argon2 = { workspace = true } -sha3 = { workspace = true } -starcoin-chain-api = { workspace = true } -starcoin-crypto = { workspace = true } -starcoin-logger = { workspace = true } -starcoin-state-api = { workspace = true } -starcoin-time-service = { workspace = true } -starcoin-types = { workspace = true } -starcoin-vm-types = { workspace = true } -thiserror = { workspace = true } -rocksdb = { workspace = true } -bincode = { workspace = true } -serde = { workspace = true } -starcoin-storage = { workspace = true } -parking_lot = { workspace = true } -itertools = { workspace = true } -starcoin-config = { workspace = true } -bcs-ext = { workspace = true } -starcoin-flexidag = {workspace = true} \ No newline at end of file diff --git a/flexidag/service/src/lib.rs b/flexidag/service/src/lib.rs deleted file mode 100644 index 724c651f21..0000000000 --- a/flexidag/service/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -mod service; diff --git a/flexidag/service/src/service.rs b/flexidag/service/src/service.rs deleted file mode 100644 index 7e5ed771f7..0000000000 --- a/flexidag/service/src/service.rs +++ /dev/null @@ -1,263 +0,0 @@ -use anyhow::{anyhow, bail, Ok}; -use flexidag::consensusdb::schemadb::GhostdagStoreReader; -use flexidag::consensusdb::{ - prelude::FlexiDagStorage, - schemadb::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, - HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, - }, -}; -use flexidag::dag::ghostdag::protocol::GhostdagManager; -use flexidag::dag::reachability::{inquirer, reachability_service::MTReachabilityService}; -use flexidag::dag::types::ghostdata::GhostdagData; -use flexidag::StoreError; -use parking_lot::RwLock; -use starcoin_crypto::{HashValue as Hash, HashValue}; -use starcoin_types::startup_info::StartupInfo; -use starcoin_types::{ - blockhash::{BlockHashes, KType, ORIGIN}, - consensus_header::{ConsensusHeader, DagHeader}, -}; -use std::collections::HashMap; -use std::collections::HashSet; -use std::sync::Arc; - -pub type DbGhostdagManager = GhostdagManager< - DbGhostdagStore, - DbRelationsStore, - MTReachabilityService<DbReachabilityStore>, - DbHeadersStore, ->; - -#[derive(Clone)] -pub struct FlexiDAG { - genesis_hash: Hash, - ghostdag_manager: DbGhostdagManager, - relations_store: DbRelationsStore, - reachability_store: DbReachabilityStore, - ghostdag_store: DbGhostdagStore, - header_store: DbHeadersStore, - /// orphan blocks, parent hash -> orphan block - missing_blocks: HashMap<Hash, HashSet<DagHeader>>, - tips: Vec<HashValue>, - startup_info: StartupInfo, - storage: Storage, -} - -impl FlexiDAG { - pub fn new(genesis_hash: Hash, k: KType, db: FlexiDagStorage) -> Self { - let ghostdag_store = db.ghost_dag_store.clone(); - let header_store = db.header_store.clone(); - let relations_store = db.relations_store.clone(); - let mut reachability_store = db.reachability_store; - inquirer::init(&mut reachability_store).unwrap(); - let reachability_service = - MTReachabilityService::new(Arc::new(RwLock::new(reachability_store.clone()))); - let ghostdag_manager = DbGhostdagManager::new( - genesis_hash, - k, - ghostdag_store.clone(), - relations_store.clone(), - header_store.clone(), - reachability_service, - ); - - let mut dag = Self { - genesis_hash, - ghostdag_manager, - relations_store, - reachability_store, - ghostdag_store, - header_store, - missing_blocks: HashMap::new(), - }; - dag - } - - pub fn clear_missing_block(&mut self) { - self.missing_blocks.clear(); - } - - pub fn init_with_genesis(&mut self, genesis: DagHeader) -> anyhow::Result<()> { - if self.relations_store.has(Hash::new(ORIGIN))? { - return Err(anyhow!("Already init with genesis")); - }; - self.relations_store - .insert(Hash::new(ORIGIN), BlockHashes::new(vec![])) - .unwrap(); - let _ = self.addToDag(genesis); - Ok(()) - } - - pub fn addToDag(&mut self, header: DagHeader) -> anyhow::Result<GhostdagData> { - //TODO:check genesis - // Generate ghostdag data - let parents_hash = header.parents_hash(); - let ghostdag_data = if header.hash() != self.genesis_hash { - self.ghostdag_manager.ghostdag(parents_hash) - } else { - self.ghostdag_manager.genesis_ghostdag_data() - }; - // Store ghostdata - self.ghostdag_store - .insert(header.hash(), Arc::new(ghostdag_data.clone())) - .unwrap(); - - // Update reachability store - let mut reachability_store = self.reachability_store.clone(); - let mut merge_set = ghostdag_data - .unordered_mergeset_without_selected_parent() - .filter(|hash| self.reachability_store.has(*hash).unwrap()); - - inquirer::add_block( - &mut reachability_store, - header.hash(), - ghostdag_data.selected_parent, - &mut merge_set, - )?; - - // store relations - self.relations_store - .insert(header.hash(), BlockHashes::new(parents_hash.to_vec()))?; - // Store header store - let _ = self - .header_store - .insert(header.hash(), Arc::new(header.to_owned()), 0)?; - return Ok(ghostdag_data.clone()); - } - fn is_in_dag(&self, _hash: Hash) -> anyhow::Result<bool> { - return Ok(true); - } - pub fn verify_header(&self, _header: &DagHeader) -> anyhow::Result<()> { - //TODO: implemented it - Ok(()) - } - - pub fn connect_block(&mut self, header: DagHeader) -> anyhow::Result<()> { - let _ = self.verify_header(&header)?; - let is_orphan_block = self.update_orphans(&header)?; - if is_orphan_block { - return Ok(()); - } - self.addToDag(header.clone()); - self.check_missing_block(header)?; - Ok(()) - } - - pub fn check_missing_block(&mut self, header: DagHeader) -> anyhow::Result<()> { - if let Some(orphans) = self.missing_blocks.remove(&header.hash()) { - for orphan in orphans.iter() { - let is_orphan = self.is_orphan(&orphan)?; - if !is_orphan { - self.addToDag(header.clone()); - } - } - } - Ok(()) - } - fn is_orphan(&self, header: &DagHeader) -> anyhow::Result<bool> { - for parent in header.parents_hash() { - if !self.is_in_dag(parent.to_owned())? { - return Ok(false); - } - } - return Ok(true); - } - pub fn get_ghostdag_data(&self, hash: Hash) -> anyhow::Result<Arc<GhostdagData>> { - let ghostdata = self.ghostdag_store.get_data(hash)?; - return Ok(ghostdata); - } - - fn update_orphans(&mut self, block_header: &DagHeader) -> anyhow::Result<bool> { - let mut is_orphan = false; - for parent in block_header.parents_hash() { - if self.is_in_dag(parent.to_owned())? { - continue; - } - if !self - .missing_blocks - .entry(parent.to_owned()) - .or_insert_with(HashSet::new) - .insert(block_header.to_owned()) - { - return Err(anyhow::anyhow!("Block already processed as a orphan")); - } - is_orphan = true; - } - Ok(is_orphan) - } - - pub fn get_block_header(&self, hash: Hash) -> anyhow::Result<DagHeader> { - match self.header_store.get_header(hash) { - anyhow::Result::Ok(header) => anyhow::Result::Ok(header), - Err(error) => { - println!("failed to get header by hash: {}", error.to_string()); - bail!("failed to get header by hash: {}", error.to_string()); - } - } - } - - pub fn get_parents(&self, hash: Hash) -> anyhow::Result<Vec<Hash>> { - match self.relations_store.get_parents(hash) { - anyhow::Result::Ok(parents) => anyhow::Result::Ok((*parents).clone()), - Err(error) => { - println!("failed to get parents by hash: {}", error.to_string()); - bail!("failed to get parents by hash: {}", error.to_string()); - } - } - } - - pub fn get_children(&self, hash: Hash) -> anyhow::Result<Vec<Hash>> { - match self.relations_store.get_children(hash) { - anyhow::Result::Ok(children) => anyhow::Result::Ok((*children).clone()), - Err(error) => { - println!("failed to get parents by hash: {}", error.to_string()); - bail!("failed to get parents by hash: {}", error.to_string()); - } - } - } - - // for testing - pub fn push_parent_children( - &mut self, - child: Hash, - parents: Arc<Vec<Hash>>, - ) -> Result<(), StoreError> { - self.relations_store.insert(child, parents) - } - - pub fn get_genesis_hash(&self) -> Hash { - self.genesis_hash - } -} - -#[cfg(test)] -mod tests { - use super::*; - use flexidag::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; - use starcoin_types::block::BlockHeader; - use std::{env, fs}; - - #[test] - fn base_test() { - let genesis = DagHeader::new_genesis(BlockHeader::random()); - let genesis_hash = genesis.hash(); - let k = 16; - let db_path = env::temp_dir().join("smolstc"); - println!("db path:{}", db_path.to_string_lossy()); - if db_path - .as_path() - .try_exists() - .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) - { - fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); - } - let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); - let db = FlexiDagStorage::create_from_path(db_path, config) - .expect("Failed to create flexidag storage"); - let mut dag = FlexiDAG::new(genesis_hash, k, db); - dag.init_with_genesis(genesis); - let block = DagHeader::new(BlockHeader::random(), vec![genesis_hash]); - dag.addToDag(block); - } -} diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs deleted file mode 100644 index 4c9b979664..0000000000 --- a/flexidag/src/blockdag.rs +++ /dev/null @@ -1,260 +0,0 @@ -use crate::consensusdb::prelude::StoreError; -use crate::consensusdb::schemadb::GhostdagStoreReader; -use crate::consensusdb::{ - prelude::FlexiDagStorage, - schemadb::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, GhostdagStore, - HeaderStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, - }, -}; -use crate::dag::ghostdag::protocol::{ColoringOutput, GhostdagManager}; -use crate::dag::reachability::{inquirer, reachability_service::MTReachabilityService}; -use crate::dag::types::ghostdata::GhostdagData; -use anyhow::{anyhow, bail, Ok}; -use parking_lot::RwLock; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::{ - blockhash::{BlockHashes, KType, ORIGIN}, - consensus_header::{ConsensusHeader, DagHeader}, -}; -use std::collections::HashMap; -use std::collections::HashSet; -use std::sync::Arc; - -pub type DbGhostdagManager = GhostdagManager< - DbGhostdagStore, - DbRelationsStore, - MTReachabilityService<DbReachabilityStore>, - DbHeadersStore, ->; - -#[derive(Clone)] -pub struct BlockDAG { - genesis_hash: Hash, - ghostdag_manager: DbGhostdagManager, - relations_store: DbRelationsStore, - reachability_store: DbReachabilityStore, - ghostdag_store: DbGhostdagStore, - header_store: DbHeadersStore, - /// orphan blocks, parent hash -> orphan block - missing_blocks: HashMap<Hash, HashSet<DagHeader>>, -} - -impl BlockDAG { - pub fn new(genesis_hash: Hash, k: KType, db: FlexiDagStorage) -> Self { - let ghostdag_store = db.ghost_dag_store.clone(); - let header_store = db.header_store.clone(); - let relations_store = db.relations_store.clone(); - let mut reachability_store = db.reachability_store; - inquirer::init(&mut reachability_store).unwrap(); - let reachability_service = - MTReachabilityService::new(Arc::new(RwLock::new(reachability_store.clone()))); - let ghostdag_manager = DbGhostdagManager::new( - genesis_hash, - k, - ghostdag_store.clone(), - relations_store.clone(), - header_store.clone(), - reachability_service, - ); - - let mut dag = Self { - genesis_hash, - ghostdag_manager, - relations_store, - reachability_store, - ghostdag_store, - header_store, - missing_blocks: HashMap::new(), - }; - dag - } - - pub fn clear_missing_block(&mut self) { - self.missing_blocks.clear(); - } - - pub fn init_with_genesis(&mut self, genesis: DagHeader) -> anyhow::Result<()> { - if self.relations_store.has(Hash::new(ORIGIN))? { - return Err(anyhow!("Already init with genesis")); - }; - self.relations_store - .insert(Hash::new(ORIGIN), BlockHashes::new(vec![])) - .unwrap(); - let _ = self.addToDag(genesis); - Ok(()) - } - - pub fn addToDag(&mut self, header: DagHeader) -> anyhow::Result<GhostdagData> { - //TODO:check genesis - // Generate ghostdag data - let parents_hash = header.parents_hash(); - let ghostdag_data = if header.hash() != self.genesis_hash { - self.ghostdag_manager.ghostdag(parents_hash) - } else { - self.ghostdag_manager.genesis_ghostdag_data() - }; - // Store ghostdata - self.ghostdag_store - .insert(header.hash(), Arc::new(ghostdag_data.clone())) - .unwrap(); - - // Update reachability store - let mut reachability_store = self.reachability_store.clone(); - let mut merge_set = ghostdag_data - .unordered_mergeset_without_selected_parent() - .filter(|hash| self.reachability_store.has(*hash).unwrap()); - - inquirer::add_block( - &mut reachability_store, - header.hash(), - ghostdag_data.selected_parent, - &mut merge_set, - )?; - - // store relations - self.relations_store - .insert(header.hash(), BlockHashes::new(parents_hash.to_vec()))?; - // Store header store - let _ = self - .header_store - .insert(header.hash(), Arc::new(header.to_owned()), 0)?; - return Ok(ghostdag_data.clone()); - } - - fn is_in_dag(&self, _hash: Hash) -> anyhow::Result<bool> { - return Ok(true); - } - pub fn verify_header(&self, _header: &DagHeader) -> anyhow::Result<()> { - //TODO: implemented it - Ok(()) - } - - pub fn connect_block(&mut self, header: DagHeader) -> anyhow::Result<()> { - let _ = self.verify_header(&header)?; - let is_orphan_block = self.update_orphans(&header)?; - if is_orphan_block { - return Ok(()); - } - self.addToDag(header.clone()); - self.check_missing_block(header)?; - Ok(()) - } - - pub fn check_missing_block(&mut self, header: DagHeader) -> anyhow::Result<()> { - if let Some(orphans) = self.missing_blocks.remove(&header.hash()) { - for orphan in orphans.iter() { - let is_orphan = self.is_orphan(&orphan)?; - if !is_orphan { - self.addToDag(header.clone()); - } - } - } - Ok(()) - } - fn is_orphan(&self, header: &DagHeader) -> anyhow::Result<bool> { - for parent in header.parents_hash() { - if !self.is_in_dag(parent.to_owned())? { - return Ok(false); - } - } - return Ok(true); - } - pub fn get_ghostdag_data(&self, hash: Hash) -> anyhow::Result<Arc<GhostdagData>> { - let ghostdata = self.ghostdag_store.get_data(hash)?; - return Ok(ghostdata); - } - - fn update_orphans(&mut self, block_header: &DagHeader) -> anyhow::Result<bool> { - let mut is_orphan = false; - for parent in block_header.parents_hash() { - if self.is_in_dag(parent.to_owned())? { - continue; - } - if !self - .missing_blocks - .entry(parent.to_owned()) - .or_insert_with(HashSet::new) - .insert(block_header.to_owned()) - { - return Err(anyhow::anyhow!("Block already processed as a orphan")); - } - is_orphan = true; - } - Ok(is_orphan) - } - - pub fn get_block_header(&self, hash: Hash) -> anyhow::Result<DagHeader> { - match self.header_store.get_header(hash) { - anyhow::Result::Ok(header) => anyhow::Result::Ok(header), - Err(error) => { - println!("failed to get header by hash: {}", error.to_string()); - bail!("failed to get header by hash: {}", error.to_string()); - } - } - } - - pub fn get_parents(&self, hash: Hash) -> anyhow::Result<Vec<Hash>> { - match self.relations_store.get_parents(hash) { - anyhow::Result::Ok(parents) => anyhow::Result::Ok((*parents).clone()), - Err(error) => { - println!("failed to get parents by hash: {}", error.to_string()); - bail!("failed to get parents by hash: {}", error.to_string()); - } - } - } - - pub fn get_children(&self, hash: Hash) -> anyhow::Result<Vec<Hash>> { - match self.relations_store.get_children(hash) { - anyhow::Result::Ok(children) => anyhow::Result::Ok((*children).clone()), - Err(error) => { - println!("failed to get parents by hash: {}", error.to_string()); - bail!("failed to get parents by hash: {}", error.to_string()); - } - } - } - - // for testing - pub fn push_parent_children( - &mut self, - child: Hash, - parents: Arc<Vec<Hash>>, - ) -> Result<(), StoreError> { - self.relations_store.insert(child, parents) - } - - pub fn get_genesis_hash(&self) -> Hash { - self.genesis_hash - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig}; - use starcoin_types::block::BlockHeader; - use std::{env, fs}; - - #[test] - fn base_test() { - let genesis = DagHeader::new_genesis(BlockHeader::random()); - let genesis_hash = genesis.hash(); - let k = 16; - let db_path = env::temp_dir().join("smolstc"); - println!("db path:{}", db_path.to_string_lossy()); - if db_path - .as_path() - .try_exists() - .unwrap_or_else(|_| panic!("Failed to check {db_path:?}")) - { - fs::remove_dir_all(db_path.as_path()).expect("Failed to delete temporary directory"); - } - let config = FlexiDagStorageConfig::create_with_params(1, 0, 1024); - let db = FlexiDagStorage::create_from_path(db_path, config) - .expect("Failed to create flexidag storage"); - let mut dag = BlockDAG::new(genesis_hash, k, db); - dag.init_with_genesis(genesis); - let block = DagHeader::new(BlockHeader::random(), vec![genesis_hash]); - dag.addToDag(block); - } -} diff --git a/flexidag/src/consensusdb/access.rs b/flexidag/src/consensusdb/access.rs deleted file mode 100644 index e46e85acfe..0000000000 --- a/flexidag/src/consensusdb/access.rs +++ /dev/null @@ -1,199 +0,0 @@ -use super::{cache::DagCache, db::DBStorage, error::StoreError}; - -use super::prelude::DbWriter; -use super::schema::{KeyCodec, Schema, ValueCodec}; -use itertools::Itertools; -use rocksdb::{Direction, IteratorMode, ReadOptions}; -use starcoin_storage::storage::RawDBStorage; -use std::{ - collections::hash_map::RandomState, error::Error, hash::BuildHasher, marker::PhantomData, - sync::Arc, -}; - -/// A concurrent DB store access with typed caching. -#[derive(Clone)] -pub struct CachedDbAccess<S: Schema, R = RandomState> { - db: Arc<DBStorage>, - - // Cache - cache: DagCache<S::Key, S::Value>, - - _phantom: PhantomData<R>, -} - -impl<S: Schema, R> CachedDbAccess<S, R> -where - R: BuildHasher + Default, -{ - pub fn new(db: Arc<DBStorage>, cache_size: u64) -> Self { - Self { - db, - cache: DagCache::new_with_capacity(cache_size), - _phantom: Default::default(), - } - } - - pub fn read_from_cache(&self, key: S::Key) -> Option<S::Value> { - self.cache.get(&key) - } - - pub fn has(&self, key: S::Key) -> Result<bool, StoreError> { - Ok(self.cache.contains_key(&key) - || self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - .is_some()) - } - - pub fn read(&self, key: S::Key) -> Result<S::Value, StoreError> { - if let Some(data) = self.cache.get(&key) { - Ok(data) - } else if let Some(slice) = self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, key.encode_key().unwrap()) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - { - let data = S::Value::decode_value(slice.as_ref()) - .map_err(|o| StoreError::DecodeError(o.to_string()))?; - self.cache.insert(key, data.clone()); - Ok(data) - } else { - Err(StoreError::KeyNotFound("".to_string())) - } - } - - pub fn iterator( - &self, - ) -> Result<impl Iterator<Item = Result<(Box<[u8]>, S::Value), Box<dyn Error>>> + '_, StoreError> - { - let db_iterator = self - .db - .raw_iterator_cf_opt( - S::COLUMN_FAMILY, - IteratorMode::Start, - ReadOptions::default(), - ) - .map_err(|e| StoreError::CFNotExist(e.to_string()))?; - - Ok(db_iterator.map(|iter_result| match iter_result { - Ok((key, data_bytes)) => match S::Value::decode_value(&data_bytes) { - Ok(data) => Ok((key, data)), - Err(e) => Err(e.into()), - }, - Err(e) => Err(e.into()), - })) - } - - pub fn write( - &self, - mut writer: impl DbWriter, - key: S::Key, - data: S::Value, - ) -> Result<(), StoreError> { - writer.put::<S>(&key, &data)?; - self.cache.insert(key, data); - Ok(()) - } - - pub fn write_many( - &self, - mut writer: impl DbWriter, - iter: &mut (impl Iterator<Item = (S::Key, S::Value)> + Clone), - ) -> Result<(), StoreError> { - for (key, data) in iter { - writer.put::<S>(&key, &data)?; - self.cache.insert(key, data); - } - Ok(()) - } - - /// Write directly from an iterator and do not cache any data. NOTE: this action also clears the cache - pub fn write_many_without_cache( - &self, - mut writer: impl DbWriter, - iter: &mut impl Iterator<Item = (S::Key, S::Value)>, - ) -> Result<(), StoreError> { - for (key, data) in iter { - writer.put::<S>(&key, &data)?; - } - // The cache must be cleared in order to avoid invalidated entries - self.cache.remove_all(); - Ok(()) - } - - pub fn delete(&self, mut writer: impl DbWriter, key: S::Key) -> Result<(), StoreError> { - self.cache.remove(&key); - writer.delete::<S>(&key)?; - Ok(()) - } - - pub fn delete_many( - &self, - mut writer: impl DbWriter, - key_iter: &mut (impl Iterator<Item = S::Key> + Clone), - ) -> Result<(), StoreError> { - let key_iter_clone = key_iter.clone(); - self.cache.remove_many(key_iter); - for key in key_iter_clone { - writer.delete::<S>(&key)?; - } - Ok(()) - } - - pub fn delete_all(&self, mut writer: impl DbWriter) -> Result<(), StoreError> { - self.cache.remove_all(); - let keys = self - .db - .raw_iterator_cf_opt( - S::COLUMN_FAMILY, - IteratorMode::Start, - ReadOptions::default(), - ) - .map_err(|e| StoreError::CFNotExist(e.to_string()))? - .map(|iter_result| match iter_result { - Ok((key, _)) => Ok::<_, rocksdb::Error>(key), - Err(e) => Err(e), - }) - .collect_vec(); - for key in keys { - writer.delete::<S>(&S::Key::decode_key(&key?)?)?; - } - Ok(()) - } - - /// A dynamic iterator that can iterate through a specific prefix, and from a certain start point. - //TODO: loop and chain iterators for multi-prefix iterator. - pub fn seek_iterator( - &self, - seek_from: Option<S::Key>, // iter whole range if None - limit: usize, // amount to take. - skip_first: bool, // skips the first value, (useful in conjunction with the seek-key, as to not re-retrieve). - ) -> Result<impl Iterator<Item = Result<(Box<[u8]>, S::Value), Box<dyn Error>>> + '_, StoreError> - { - let read_opts = ReadOptions::default(); - let mut db_iterator = match seek_from { - Some(seek_key) => self.db.raw_iterator_cf_opt( - S::COLUMN_FAMILY, - IteratorMode::From(seek_key.encode_key()?.as_slice(), Direction::Forward), - read_opts, - ), - None => self - .db - .raw_iterator_cf_opt(S::COLUMN_FAMILY, IteratorMode::Start, read_opts), - } - .map_err(|e| StoreError::CFNotExist(e.to_string()))?; - - if skip_first { - db_iterator.next(); - } - - Ok(db_iterator.take(limit).map(move |item| match item { - Ok((key_bytes, value_bytes)) => match S::Value::decode_value(value_bytes.as_ref()) { - Ok(value) => Ok((key_bytes, value)), - Err(err) => Err(err.into()), - }, - Err(err) => Err(err.into()), - })) - } -} diff --git a/flexidag/src/consensusdb/cache.rs b/flexidag/src/consensusdb/cache.rs deleted file mode 100644 index e2d5de0c3c..0000000000 --- a/flexidag/src/consensusdb/cache.rs +++ /dev/null @@ -1,44 +0,0 @@ -use core::hash::Hash; -use starcoin_storage::cache_storage::GCacheStorage; -use std::sync::Arc; - -#[derive(Clone)] -pub struct DagCache<K: Hash + Eq + Default, V: Default> { - cache: Arc<GCacheStorage<K, V>>, -} - -impl<K, V> DagCache<K, V> -where - K: Hash + Eq + Default, - V: Default + Clone, -{ - pub(crate) fn new_with_capacity(size: u64) -> Self { - Self { - cache: Arc::new(GCacheStorage::new_with_capacity(size as usize, None)), - } - } - - pub(crate) fn get(&self, key: &K) -> Option<V> { - self.cache.get_inner(key) - } - - pub(crate) fn contains_key(&self, key: &K) -> bool { - self.get(key).is_some() - } - - pub(crate) fn insert(&self, key: K, data: V) { - self.cache.put_inner(key, data); - } - - pub(crate) fn remove(&self, key: &K) { - self.cache.remove_inner(key); - } - - pub(crate) fn remove_many(&self, key_iter: &mut impl Iterator<Item = K>) { - key_iter.for_each(|k| self.remove(&k)); - } - - pub(crate) fn remove_all(&self) { - self.cache.remove_all(); - } -} diff --git a/flexidag/src/consensusdb/consensus_ghostdag.rs b/flexidag/src/consensusdb/consensus_ghostdag.rs deleted file mode 100644 index a6746d9eb5..0000000000 --- a/flexidag/src/consensusdb/consensus_ghostdag.rs +++ /dev/null @@ -1,512 +0,0 @@ -use super::schema::{KeyCodec, ValueCodec}; -use super::{ - db::DBStorage, - error::StoreError, - prelude::{CachedDbAccess, DirectDbWriter}, - writer::BatchDbWriter, -}; -use crate::define_schema; -use starcoin_types::blockhash::{ - BlockHashMap, BlockHashes, BlockLevel, BlueWorkType, HashKTypeMap, -}; - -use crate::dag::types::{ - ghostdata::{CompactGhostdagData, GhostdagData}, - ordering::SortableBlock, -}; -use itertools::{ - EitherOrBoth::{Both, Left, Right}, - Itertools, -}; -use rocksdb::WriteBatch; -use starcoin_crypto::HashValue as Hash; -use std::{cell::RefCell, cmp, iter::once, sync::Arc}; - -pub trait GhostdagStoreReader { - fn get_blue_score(&self, hash: Hash) -> Result<u64, StoreError>; - fn get_blue_work(&self, hash: Hash) -> Result<BlueWorkType, StoreError>; - fn get_selected_parent(&self, hash: Hash) -> Result<Hash, StoreError>; - fn get_mergeset_blues(&self, hash: Hash) -> Result<BlockHashes, StoreError>; - fn get_mergeset_reds(&self, hash: Hash) -> Result<BlockHashes, StoreError>; - fn get_blues_anticone_sizes(&self, hash: Hash) -> Result<HashKTypeMap, StoreError>; - - /// Returns full block data for the requested hash - fn get_data(&self, hash: Hash) -> Result<Arc<GhostdagData>, StoreError>; - - fn get_compact_data(&self, hash: Hash) -> Result<CompactGhostdagData, StoreError>; - - /// Check if the store contains data for the requested hash - fn has(&self, hash: Hash) -> Result<bool, StoreError>; -} - -pub trait GhostdagStore: GhostdagStoreReader { - /// Insert GHOSTDAG data for block `hash` into the store. Note that GHOSTDAG data - /// is added once and never modified, so no need for specific setters for each element. - /// Additionally, this means writes are semantically "append-only", which is why - /// we can keep the `insert` method non-mutable on self. See "Parallel Processing.md" for an overview. - fn insert(&self, hash: Hash, data: Arc<GhostdagData>) -> Result<(), StoreError>; -} - -pub struct GhostDagDataWrapper(GhostdagData); - -impl From<GhostdagData> for GhostDagDataWrapper { - fn from(value: GhostdagData) -> Self { - Self(value) - } -} - -impl GhostDagDataWrapper { - /// Returns an iterator to the mergeset in ascending blue work order (tie-breaking by hash) - pub fn ascending_mergeset_without_selected_parent<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator<Item = Result<SortableBlock, StoreError>> + '_ { - self.0 - .mergeset_blues - .iter() - .skip(1) // Skip the selected parent - .cloned() - .map(|h| { - store - .get_blue_work(h) - .map(|blue| SortableBlock::new(h, blue)) - }) - .merge_join_by( - self.0 - .mergeset_reds - .iter() - .cloned() - .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), - |a, b| match (a, b) { - (Ok(a), Ok(b)) => a.cmp(b), - (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node - (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node - (Err(_), Err(_)) => cmp::Ordering::Equal, // remove both Err nodes - }, - ) - .map(|r| match r { - Left(b) | Right(b) => b, - Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), - }) - } - - /// Returns an iterator to the mergeset in descending blue work order (tie-breaking by hash) - pub fn descending_mergeset_without_selected_parent<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator<Item = Result<SortableBlock, StoreError>> + '_ { - self.0 - .mergeset_blues - .iter() - .skip(1) // Skip the selected parent - .rev() // Reverse since blues and reds are stored with ascending blue work order - .cloned() - .map(|h| { - store - .get_blue_work(h) - .map(|blue| SortableBlock::new(h, blue)) - }) - .merge_join_by( - self.0 - .mergeset_reds - .iter() - .rev() // Reverse - .cloned() - .map(|h| store.get_blue_work(h).map(|red| SortableBlock::new(h, red))), - |a, b| match (b, a) { - (Ok(b), Ok(a)) => b.cmp(a), - (Err(_), Ok(_)) => cmp::Ordering::Less, // select left Err node - (Ok(_), Err(_)) => cmp::Ordering::Greater, // select right Err node - (Err(_), Err(_)) => cmp::Ordering::Equal, // select both Err nodes - }, // Reverse - ) - .map(|r| match r { - Left(b) | Right(b) => b, - Both(c, _) => Err(StoreError::DAGDupBlocksError(format!("{c:?}"))), - }) - } - - /// Returns an iterator to the mergeset in topological consensus order -- starting with the selected parent, - /// and adding the mergeset in increasing blue work order. Note that this is a topological order even though - /// the selected parent has highest blue work by def -- since the mergeset is in its anticone. - pub fn consensus_ordered_mergeset<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator<Item = Result<Hash, StoreError>> + '_ { - once(Ok(self.0.selected_parent)).chain( - self.ascending_mergeset_without_selected_parent(store) - .map(|s| s.map(|s| s.hash)), - ) - } - - /// Returns an iterator to the mergeset in topological consensus order without the selected parent - pub fn consensus_ordered_mergeset_without_selected_parent<'a>( - &'a self, - store: &'a (impl GhostdagStoreReader + ?Sized), - ) -> impl Iterator<Item = Result<Hash, StoreError>> + '_ { - self.ascending_mergeset_without_selected_parent(store) - .map(|s| s.map(|s| s.hash)) - } -} - -pub(crate) const GHOST_DAG_STORE_CF: &str = "block-ghostdag-data"; -pub(crate) const COMPACT_GHOST_DAG_STORE_CF: &str = "compact-block-ghostdag-data"; - -define_schema!(GhostDag, Hash, Arc<GhostdagData>, GHOST_DAG_STORE_CF); -define_schema!( - CompactGhostDag, - Hash, - CompactGhostdagData, - COMPACT_GHOST_DAG_STORE_CF -); - -impl KeyCodec<GhostDag> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec<GhostDag> for Arc<GhostdagData> { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -impl KeyCodec<CompactGhostDag> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec<CompactGhostDag> for CompactGhostdagData { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `GhostdagStore` trait, with concurrency support. -#[derive(Clone)] -pub struct DbGhostdagStore { - db: Arc<DBStorage>, - level: BlockLevel, - access: CachedDbAccess<GhostDag>, - compact_access: CachedDbAccess<CompactGhostDag>, -} - -impl DbGhostdagStore { - pub fn new(db: Arc<DBStorage>, level: BlockLevel, cache_size: u64) -> Self { - Self { - db: Arc::clone(&db), - level, - access: CachedDbAccess::new(db.clone(), cache_size), - compact_access: CachedDbAccess::new(db, cache_size), - } - } - - pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { - Self::new(Arc::clone(&self.db), self.level, cache_size) - } - - pub fn insert_batch( - &self, - batch: &mut WriteBatch, - hash: Hash, - data: &Arc<GhostdagData>, - ) -> Result<(), StoreError> { - if self.access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.access - .write(BatchDbWriter::new(batch), hash, data.clone())?; - self.compact_access.write( - BatchDbWriter::new(batch), - hash, - CompactGhostdagData { - blue_score: data.blue_score, - blue_work: data.blue_work, - selected_parent: data.selected_parent, - }, - )?; - Ok(()) - } -} - -impl GhostdagStoreReader for DbGhostdagStore { - fn get_blue_score(&self, hash: Hash) -> Result<u64, StoreError> { - Ok(self.access.read(hash)?.blue_score) - } - - fn get_blue_work(&self, hash: Hash) -> Result<BlueWorkType, StoreError> { - Ok(self.access.read(hash)?.blue_work) - } - - fn get_selected_parent(&self, hash: Hash) -> Result<Hash, StoreError> { - Ok(self.access.read(hash)?.selected_parent) - } - - fn get_mergeset_blues(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - Ok(Arc::clone(&self.access.read(hash)?.mergeset_blues)) - } - - fn get_mergeset_reds(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - Ok(Arc::clone(&self.access.read(hash)?.mergeset_reds)) - } - - fn get_blues_anticone_sizes(&self, hash: Hash) -> Result<HashKTypeMap, StoreError> { - Ok(Arc::clone(&self.access.read(hash)?.blues_anticone_sizes)) - } - - fn get_data(&self, hash: Hash) -> Result<Arc<GhostdagData>, StoreError> { - self.access.read(hash) - } - - fn get_compact_data(&self, hash: Hash) -> Result<CompactGhostdagData, StoreError> { - self.compact_access.read(hash) - } - - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - self.access.has(hash) - } -} - -impl GhostdagStore for DbGhostdagStore { - fn insert(&self, hash: Hash, data: Arc<GhostdagData>) -> Result<(), StoreError> { - if self.access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.access - .write(DirectDbWriter::new(&self.db), hash, data.clone())?; - if self.compact_access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.compact_access.write( - DirectDbWriter::new(&self.db), - hash, - CompactGhostdagData { - blue_score: data.blue_score, - blue_work: data.blue_work, - selected_parent: data.selected_parent, - }, - )?; - Ok(()) - } -} - -/// An in-memory implementation of `GhostdagStore` trait to be used for tests. -/// Uses `RefCell` for interior mutability in order to workaround `insert` -/// being non-mutable. -pub struct MemoryGhostdagStore { - blue_score_map: RefCell<BlockHashMap<u64>>, - blue_work_map: RefCell<BlockHashMap<BlueWorkType>>, - selected_parent_map: RefCell<BlockHashMap<Hash>>, - mergeset_blues_map: RefCell<BlockHashMap<BlockHashes>>, - mergeset_reds_map: RefCell<BlockHashMap<BlockHashes>>, - blues_anticone_sizes_map: RefCell<BlockHashMap<HashKTypeMap>>, -} - -impl MemoryGhostdagStore { - pub fn new() -> Self { - Self { - blue_score_map: RefCell::new(BlockHashMap::new()), - blue_work_map: RefCell::new(BlockHashMap::new()), - selected_parent_map: RefCell::new(BlockHashMap::new()), - mergeset_blues_map: RefCell::new(BlockHashMap::new()), - mergeset_reds_map: RefCell::new(BlockHashMap::new()), - blues_anticone_sizes_map: RefCell::new(BlockHashMap::new()), - } - } -} - -impl Default for MemoryGhostdagStore { - fn default() -> Self { - Self::new() - } -} - -impl GhostdagStore for MemoryGhostdagStore { - fn insert(&self, hash: Hash, data: Arc<GhostdagData>) -> Result<(), StoreError> { - if self.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.blue_score_map - .borrow_mut() - .insert(hash, data.blue_score); - self.blue_work_map.borrow_mut().insert(hash, data.blue_work); - self.selected_parent_map - .borrow_mut() - .insert(hash, data.selected_parent); - self.mergeset_blues_map - .borrow_mut() - .insert(hash, data.mergeset_blues.clone()); - self.mergeset_reds_map - .borrow_mut() - .insert(hash, data.mergeset_reds.clone()); - self.blues_anticone_sizes_map - .borrow_mut() - .insert(hash, data.blues_anticone_sizes.clone()); - Ok(()) - } -} - -impl GhostdagStoreReader for MemoryGhostdagStore { - fn get_blue_score(&self, hash: Hash) -> Result<u64, StoreError> { - match self.blue_score_map.borrow().get(&hash) { - Some(blue_score) => Ok(*blue_score), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_blue_work(&self, hash: Hash) -> Result<BlueWorkType, StoreError> { - match self.blue_work_map.borrow().get(&hash) { - Some(blue_work) => Ok(*blue_work), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_selected_parent(&self, hash: Hash) -> Result<Hash, StoreError> { - match self.selected_parent_map.borrow().get(&hash) { - Some(selected_parent) => Ok(*selected_parent), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_mergeset_blues(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - match self.mergeset_blues_map.borrow().get(&hash) { - Some(mergeset_blues) => Ok(BlockHashes::clone(mergeset_blues)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_mergeset_reds(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - match self.mergeset_reds_map.borrow().get(&hash) { - Some(mergeset_reds) => Ok(BlockHashes::clone(mergeset_reds)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_blues_anticone_sizes(&self, hash: Hash) -> Result<HashKTypeMap, StoreError> { - match self.blues_anticone_sizes_map.borrow().get(&hash) { - Some(sizes) => Ok(HashKTypeMap::clone(sizes)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_data(&self, hash: Hash) -> Result<Arc<GhostdagData>, StoreError> { - if !self.has(hash)? { - return Err(StoreError::KeyNotFound(hash.to_string())); - } - Ok(Arc::new(GhostdagData::new( - self.blue_score_map.borrow()[&hash], - self.blue_work_map.borrow()[&hash], - self.selected_parent_map.borrow()[&hash], - self.mergeset_blues_map.borrow()[&hash].clone(), - self.mergeset_reds_map.borrow()[&hash].clone(), - self.blues_anticone_sizes_map.borrow()[&hash].clone(), - ))) - } - - fn get_compact_data(&self, hash: Hash) -> Result<CompactGhostdagData, StoreError> { - Ok(self.get_data(hash)?.to_compact()) - } - - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - Ok(self.blue_score_map.borrow().contains_key(&hash)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use starcoin_types::blockhash::BlockHashSet; - use std::iter::once; - - #[test] - fn test_mergeset_iterators() { - let store = MemoryGhostdagStore::new(); - - let factory = |w: u64| { - Arc::new(GhostdagData { - blue_score: Default::default(), - blue_work: w.into(), - selected_parent: Default::default(), - mergeset_blues: Default::default(), - mergeset_reds: Default::default(), - blues_anticone_sizes: Default::default(), - }) - }; - - // Blues - store.insert(1.into(), factory(2)).unwrap(); - store.insert(2.into(), factory(7)).unwrap(); - store.insert(3.into(), factory(11)).unwrap(); - - // Reds - store.insert(4.into(), factory(4)).unwrap(); - store.insert(5.into(), factory(9)).unwrap(); - store.insert(6.into(), factory(11)).unwrap(); // Tie-breaking case - - let mut data = GhostdagData::new_with_selected_parent(1.into(), 5); - data.add_blue(2.into(), Default::default(), &Default::default()); - data.add_blue(3.into(), Default::default(), &Default::default()); - - data.add_red(4.into()); - data.add_red(5.into()); - data.add_red(6.into()); - - let wrapper: GhostDagDataWrapper = data.clone().into(); - - let mut expected: Vec<Hash> = vec![4.into(), 2.into(), 5.into(), 3.into(), 6.into()]; - assert_eq!( - expected, - wrapper - .ascending_mergeset_without_selected_parent(&store) - .filter_map(|b| b.map(|b| b.hash).ok()) - .collect::<Vec<Hash>>() - ); - - itertools::assert_equal( - once(1.into()).chain(expected.iter().cloned()), - wrapper - .consensus_ordered_mergeset(&store) - .filter_map(|b| b.ok()), - ); - - expected.reverse(); - assert_eq!( - expected, - wrapper - .descending_mergeset_without_selected_parent(&store) - .filter_map(|b| b.map(|b| b.hash).ok()) - .collect::<Vec<Hash>>() - ); - - // Use sets since the below functions have no order guarantee - let expected = BlockHashSet::from_iter([4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); - assert_eq!( - expected, - data.unordered_mergeset_without_selected_parent() - .collect::<BlockHashSet>() - ); - - let expected = - BlockHashSet::from_iter([1.into(), 4.into(), 2.into(), 5.into(), 3.into(), 6.into()]); - assert_eq!( - expected, - data.unordered_mergeset().collect::<BlockHashSet>() - ); - } -} diff --git a/flexidag/src/consensusdb/consensus_header.rs b/flexidag/src/consensusdb/consensus_header.rs deleted file mode 100644 index 1e8adc2ed4..0000000000 --- a/flexidag/src/consensusdb/consensus_header.rs +++ /dev/null @@ -1,216 +0,0 @@ -use super::schema::{KeyCodec, ValueCodec}; -use super::{ - db::DBStorage, - error::{StoreError, StoreResult}, - prelude::CachedDbAccess, - writer::{BatchDbWriter, DirectDbWriter}, -}; -use crate::define_schema; -use rocksdb::WriteBatch; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::{ - blockhash::BlockLevel, - consensus_header::{CompactHeaderData, ConsensusHeader, DagHeader, HeaderWithBlockLevel}, - U256, -}; -use std::sync::Arc; - -pub trait HeaderStoreReader { - fn get_daa_score(&self, hash: Hash) -> Result<u64, StoreError>; - fn get_blue_score(&self, hash: Hash) -> Result<u64, StoreError>; - fn get_timestamp(&self, hash: Hash) -> Result<u64, StoreError>; - fn get_difficulty(&self, hash: Hash) -> Result<U256, StoreError>; - fn get_header(&self, hash: Hash) -> Result<Arc<DagHeader>, StoreError>; - fn get_header_with_block_level(&self, hash: Hash) -> Result<HeaderWithBlockLevel, StoreError>; - fn get_compact_header_data(&self, hash: Hash) -> Result<CompactHeaderData, StoreError>; -} - -pub trait HeaderStore: HeaderStoreReader { - // This is append only - fn insert( - &self, - hash: Hash, - header: Arc<DagHeader>, - block_level: BlockLevel, - ) -> Result<(), StoreError>; -} - -pub(crate) const HEADERS_STORE_CF: &str = "headers-store"; -pub(crate) const COMPACT_HEADER_DATA_STORE_CF: &str = "compact-header-data"; - -define_schema!(BlockHeader, Hash, HeaderWithBlockLevel, HEADERS_STORE_CF); -define_schema!( - CompactBlockHeader, - Hash, - CompactHeaderData, - COMPACT_HEADER_DATA_STORE_CF -); - -impl KeyCodec<BlockHeader> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec<BlockHeader> for HeaderWithBlockLevel { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl KeyCodec<CompactBlockHeader> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec<CompactBlockHeader> for CompactHeaderData { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `HeaderStore` trait, with concurrency support. -#[derive(Clone)] -pub struct DbHeadersStore { - db: Arc<DBStorage>, - headers_access: CachedDbAccess<BlockHeader>, - compact_headers_access: CachedDbAccess<CompactBlockHeader>, -} - -impl DbHeadersStore { - pub fn new(db: Arc<DBStorage>, cache_size: u64) -> Self { - Self { - db: Arc::clone(&db), - headers_access: CachedDbAccess::new(db.clone(), cache_size), - compact_headers_access: CachedDbAccess::new(db, cache_size), - } - } - - pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { - Self::new(Arc::clone(&self.db), cache_size) - } - - pub fn has(&self, hash: Hash) -> StoreResult<bool> { - self.headers_access.has(hash) - } - - pub fn get_header(&self, hash: Hash) -> Result<DagHeader, StoreError> { - let result = self.headers_access.read(hash)?; - Ok((*result.header).clone()) - } - - pub fn insert_batch( - &self, - batch: &mut WriteBatch, - hash: Hash, - header: Arc<DagHeader>, - block_level: BlockLevel, - ) -> Result<(), StoreError> { - if self.headers_access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.headers_access.write( - BatchDbWriter::new(batch), - hash, - HeaderWithBlockLevel { - header: header.clone(), - block_level, - }, - )?; - self.compact_headers_access.write( - BatchDbWriter::new(batch), - hash, - CompactHeaderData { - timestamp: header.timestamp(), - difficulty: header.difficulty(), - }, - )?; - Ok(()) - } -} - -impl HeaderStoreReader for DbHeadersStore { - fn get_daa_score(&self, _hash: Hash) -> Result<u64, StoreError> { - unimplemented!() - } - - fn get_blue_score(&self, _hash: Hash) -> Result<u64, StoreError> { - unimplemented!() - } - - fn get_timestamp(&self, hash: Hash) -> Result<u64, StoreError> { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { - return Ok(header_with_block_level.header.timestamp()); - } - Ok(self.compact_headers_access.read(hash)?.timestamp) - } - - fn get_difficulty(&self, hash: Hash) -> Result<U256, StoreError> { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { - return Ok(header_with_block_level.header.difficulty()); - } - Ok(self.compact_headers_access.read(hash)?.difficulty) - } - - fn get_header(&self, hash: Hash) -> Result<Arc<DagHeader>, StoreError> { - Ok(self.headers_access.read(hash)?.header) - } - - fn get_header_with_block_level(&self, hash: Hash) -> Result<HeaderWithBlockLevel, StoreError> { - self.headers_access.read(hash) - } - - fn get_compact_header_data(&self, hash: Hash) -> Result<CompactHeaderData, StoreError> { - if let Some(header_with_block_level) = self.headers_access.read_from_cache(hash) { - return Ok(CompactHeaderData { - timestamp: header_with_block_level.header.timestamp(), - difficulty: header_with_block_level.header.difficulty(), - }); - } - self.compact_headers_access.read(hash) - } -} - -impl HeaderStore for DbHeadersStore { - fn insert( - &self, - hash: Hash, - header: Arc<DagHeader>, - block_level: u8, - ) -> Result<(), StoreError> { - if self.headers_access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - self.compact_headers_access.write( - DirectDbWriter::new(&self.db), - hash, - CompactHeaderData { - timestamp: header.timestamp(), - difficulty: header.difficulty(), - }, - )?; - self.headers_access.write( - DirectDbWriter::new(&self.db), - hash, - HeaderWithBlockLevel { - header, - block_level, - }, - )?; - Ok(()) - } -} diff --git a/flexidag/src/consensusdb/consensus_reachability.rs b/flexidag/src/consensusdb/consensus_reachability.rs deleted file mode 100644 index 308ffb88a8..0000000000 --- a/flexidag/src/consensusdb/consensus_reachability.rs +++ /dev/null @@ -1,540 +0,0 @@ -use super::{ - db::DBStorage, - prelude::{BatchDbWriter, CachedDbAccess, CachedDbItem, DirectDbWriter, StoreError}, -}; -use starcoin_crypto::HashValue as Hash; -use starcoin_storage::storage::RawDBStorage; - -use crate::{ - dag::types::{interval::Interval, reachability::ReachabilityData}, - define_schema, - schema::{KeyCodec, ValueCodec}, -}; -use starcoin_types::blockhash::{self, BlockHashMap, BlockHashes}; - -use parking_lot::{RwLockUpgradableReadGuard, RwLockWriteGuard}; -use rocksdb::WriteBatch; -use std::{collections::hash_map::Entry::Vacant, sync::Arc}; - -/// Reader API for `ReachabilityStore`. -pub trait ReachabilityStoreReader { - fn has(&self, hash: Hash) -> Result<bool, StoreError>; - fn get_interval(&self, hash: Hash) -> Result<Interval, StoreError>; - fn get_parent(&self, hash: Hash) -> Result<Hash, StoreError>; - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError>; - fn get_future_covering_set(&self, hash: Hash) -> Result<BlockHashes, StoreError>; -} - -/// Write API for `ReachabilityStore`. All write functions are deliberately `mut` -/// since reachability writes are not append-only and thus need to be guarded. -pub trait ReachabilityStore: ReachabilityStoreReader { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError>; - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError>; - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError>; - fn append_child(&mut self, hash: Hash, child: Hash) -> Result<u64, StoreError>; - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError>; - fn get_height(&self, hash: Hash) -> Result<u64, StoreError>; - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError>; - fn get_reindex_root(&self) -> Result<Hash, StoreError>; -} - -const REINDEX_ROOT_KEY: &str = "reachability-reindex-root"; -pub(crate) const REACHABILITY_DATA_CF: &str = "reachability-data"; -// TODO: explore perf to see if using fixed-length constants for store prefixes is preferable - -define_schema!( - Reachability, - Hash, - Arc<ReachabilityData>, - REACHABILITY_DATA_CF -); -define_schema!(ReachabilityCache, Vec<u8>, Hash, REACHABILITY_DATA_CF); - -impl KeyCodec<Reachability> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec<Reachability> for Arc<ReachabilityData> { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl KeyCodec<ReachabilityCache> for Vec<u8> { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Ok(data.to_vec()) - } -} -impl ValueCodec<ReachabilityCache> for Hash { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `ReachabilityStore` trait, with concurrent readers support. -#[derive(Clone)] -pub struct DbReachabilityStore { - db: Arc<DBStorage>, - access: CachedDbAccess<Reachability>, - reindex_root: CachedDbItem<ReachabilityCache>, -} - -impl DbReachabilityStore { - pub fn new(db: Arc<DBStorage>, cache_size: u64) -> Self { - Self::new_with_prefix_end(db, cache_size) - } - - pub fn new_with_alternative_prefix_end(db: Arc<DBStorage>, cache_size: u64) -> Self { - Self::new_with_prefix_end(db, cache_size) - } - - fn new_with_prefix_end(db: Arc<DBStorage>, cache_size: u64) -> Self { - Self { - db: Arc::clone(&db), - access: CachedDbAccess::new(Arc::clone(&db), cache_size), - reindex_root: CachedDbItem::new(db, REINDEX_ROOT_KEY.as_bytes().to_vec()), - } - } - - pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { - Self::new_with_prefix_end(Arc::clone(&self.db), cache_size) - } -} - -impl ReachabilityStore for DbReachabilityStore { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { - debug_assert!(!self.access.has(origin)?); - - let data = Arc::new(ReachabilityData::new( - Hash::new(blockhash::NONE), - capacity, - 0, - )); - let mut batch = WriteBatch::default(); - self.access - .write(BatchDbWriter::new(&mut batch), origin, data)?; - self.reindex_root - .write(BatchDbWriter::new(&mut batch), &origin)?; - self.db - .raw_write_batch(batch) - .map_err(|e| StoreError::DBIoError(e.to_string()))?; - - Ok(()) - } - - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError> { - if self.access.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - let data = Arc::new(ReachabilityData::new(parent, interval, height)); - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(()) - } - - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { - let mut data = self.access.read(hash)?; - Arc::make_mut(&mut data).interval = interval; - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(()) - } - - fn append_child(&mut self, hash: Hash, child: Hash) -> Result<u64, StoreError> { - let mut data = self.access.read(hash)?; - let height = data.height; - let mut_data = Arc::make_mut(&mut data); - Arc::make_mut(&mut mut_data.children).push(child); - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(height) - } - - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError> { - let mut data = self.access.read(hash)?; - let mut_data = Arc::make_mut(&mut data); - Arc::make_mut(&mut mut_data.future_covering_set).insert(insertion_index, fci); - self.access - .write(DirectDbWriter::new(&self.db), hash, data)?; - Ok(()) - } - - fn get_height(&self, hash: Hash) -> Result<u64, StoreError> { - Ok(self.access.read(hash)?.height) - } - - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { - self.reindex_root - .write(DirectDbWriter::new(&self.db), &root) - } - - fn get_reindex_root(&self) -> Result<Hash, StoreError> { - self.reindex_root.read() - } -} - -impl ReachabilityStoreReader for DbReachabilityStore { - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - self.access.has(hash) - } - - fn get_interval(&self, hash: Hash) -> Result<Interval, StoreError> { - Ok(self.access.read(hash)?.interval) - } - - fn get_parent(&self, hash: Hash) -> Result<Hash, StoreError> { - Ok(self.access.read(hash)?.parent) - } - - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - Ok(Arc::clone(&self.access.read(hash)?.children)) - } - - fn get_future_covering_set(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - Ok(Arc::clone(&self.access.read(hash)?.future_covering_set)) - } -} - -pub struct StagingReachabilityStore<'a> { - store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>, - staging_writes: BlockHashMap<ReachabilityData>, - staging_reindex_root: Option<Hash>, -} - -impl<'a> StagingReachabilityStore<'a> { - pub fn new(store_read: RwLockUpgradableReadGuard<'a, DbReachabilityStore>) -> Self { - Self { - store_read, - staging_writes: BlockHashMap::new(), - staging_reindex_root: None, - } - } - - pub fn commit( - self, - batch: &mut WriteBatch, - ) -> Result<RwLockWriteGuard<'a, DbReachabilityStore>, StoreError> { - let mut store_write = RwLockUpgradableReadGuard::upgrade(self.store_read); - for (k, v) in self.staging_writes { - let data = Arc::new(v); - store_write - .access - .write(BatchDbWriter::new(batch), k, data)? - } - if let Some(root) = self.staging_reindex_root { - store_write - .reindex_root - .write(BatchDbWriter::new(batch), &root)?; - } - Ok(store_write) - } -} - -impl ReachabilityStore for StagingReachabilityStore<'_> { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { - self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; - self.set_reindex_root(origin)?; - Ok(()) - } - - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError> { - if self.store_read.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - if let Vacant(e) = self.staging_writes.entry(hash) { - e.insert(ReachabilityData::new(parent, interval, height)); - Ok(()) - } else { - Err(StoreError::KeyAlreadyExists(hash.to_string())) - } - } - - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { - if let Some(data) = self.staging_writes.get_mut(&hash) { - data.interval = interval; - return Ok(()); - } - - let mut data = (*self.store_read.access.read(hash)?).clone(); - data.interval = interval; - self.staging_writes.insert(hash, data); - - Ok(()) - } - - fn append_child(&mut self, hash: Hash, child: Hash) -> Result<u64, StoreError> { - if let Some(data) = self.staging_writes.get_mut(&hash) { - Arc::make_mut(&mut data.children).push(child); - return Ok(data.height); - } - - let mut data = (*self.store_read.access.read(hash)?).clone(); - let height = data.height; - Arc::make_mut(&mut data.children).push(child); - self.staging_writes.insert(hash, data); - - Ok(height) - } - - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError> { - if let Some(data) = self.staging_writes.get_mut(&hash) { - Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); - return Ok(()); - } - - let mut data = (*self.store_read.access.read(hash)?).clone(); - Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); - self.staging_writes.insert(hash, data); - - Ok(()) - } - - fn get_height(&self, hash: Hash) -> Result<u64, StoreError> { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(data.height) - } else { - Ok(self.store_read.access.read(hash)?.height) - } - } - - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { - self.staging_reindex_root = Some(root); - Ok(()) - } - - fn get_reindex_root(&self) -> Result<Hash, StoreError> { - if let Some(root) = self.staging_reindex_root { - Ok(root) - } else { - Ok(self.store_read.get_reindex_root()?) - } - } -} - -impl ReachabilityStoreReader for StagingReachabilityStore<'_> { - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - Ok(self.staging_writes.contains_key(&hash) || self.store_read.access.has(hash)?) - } - - fn get_interval(&self, hash: Hash) -> Result<Interval, StoreError> { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(data.interval) - } else { - Ok(self.store_read.access.read(hash)?.interval) - } - } - - fn get_parent(&self, hash: Hash) -> Result<Hash, StoreError> { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(data.parent) - } else { - Ok(self.store_read.access.read(hash)?.parent) - } - } - - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(BlockHashes::clone(&data.children)) - } else { - Ok(BlockHashes::clone( - &self.store_read.access.read(hash)?.children, - )) - } - } - - fn get_future_covering_set(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - if let Some(data) = self.staging_writes.get(&hash) { - Ok(BlockHashes::clone(&data.future_covering_set)) - } else { - Ok(BlockHashes::clone( - &self.store_read.access.read(hash)?.future_covering_set, - )) - } - } -} - -pub struct MemoryReachabilityStore { - map: BlockHashMap<ReachabilityData>, - reindex_root: Option<Hash>, -} - -impl Default for MemoryReachabilityStore { - fn default() -> Self { - Self::new() - } -} - -impl MemoryReachabilityStore { - pub fn new() -> Self { - Self { - map: BlockHashMap::new(), - reindex_root: None, - } - } - - fn get_data_mut(&mut self, hash: Hash) -> Result<&mut ReachabilityData, StoreError> { - match self.map.get_mut(&hash) { - Some(data) => Ok(data), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_data(&self, hash: Hash) -> Result<&ReachabilityData, StoreError> { - match self.map.get(&hash) { - Some(data) => Ok(data), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } -} - -impl ReachabilityStore for MemoryReachabilityStore { - fn init(&mut self, origin: Hash, capacity: Interval) -> Result<(), StoreError> { - self.insert(origin, Hash::new(blockhash::NONE), capacity, 0)?; - self.set_reindex_root(origin)?; - Ok(()) - } - - fn insert( - &mut self, - hash: Hash, - parent: Hash, - interval: Interval, - height: u64, - ) -> Result<(), StoreError> { - if let Vacant(e) = self.map.entry(hash) { - e.insert(ReachabilityData::new(parent, interval, height)); - Ok(()) - } else { - Err(StoreError::KeyAlreadyExists(hash.to_string())) - } - } - - fn set_interval(&mut self, hash: Hash, interval: Interval) -> Result<(), StoreError> { - let data = self.get_data_mut(hash)?; - data.interval = interval; - Ok(()) - } - - fn append_child(&mut self, hash: Hash, child: Hash) -> Result<u64, StoreError> { - let data = self.get_data_mut(hash)?; - Arc::make_mut(&mut data.children).push(child); - Ok(data.height) - } - - fn insert_future_covering_item( - &mut self, - hash: Hash, - fci: Hash, - insertion_index: usize, - ) -> Result<(), StoreError> { - let data = self.get_data_mut(hash)?; - Arc::make_mut(&mut data.future_covering_set).insert(insertion_index, fci); - Ok(()) - } - - fn get_height(&self, hash: Hash) -> Result<u64, StoreError> { - Ok(self.get_data(hash)?.height) - } - - fn set_reindex_root(&mut self, root: Hash) -> Result<(), StoreError> { - self.reindex_root = Some(root); - Ok(()) - } - - fn get_reindex_root(&self) -> Result<Hash, StoreError> { - match self.reindex_root { - Some(root) => Ok(root), - None => Err(StoreError::KeyNotFound(REINDEX_ROOT_KEY.to_string())), - } - } -} - -impl ReachabilityStoreReader for MemoryReachabilityStore { - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - Ok(self.map.contains_key(&hash)) - } - - fn get_interval(&self, hash: Hash) -> Result<Interval, StoreError> { - Ok(self.get_data(hash)?.interval) - } - - fn get_parent(&self, hash: Hash) -> Result<Hash, StoreError> { - Ok(self.get_data(hash)?.parent) - } - - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - Ok(Arc::clone(&self.get_data(hash)?.children)) - } - - fn get_future_covering_set(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - Ok(Arc::clone(&self.get_data(hash)?.future_covering_set)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_store_basics() { - let mut store: Box<dyn ReachabilityStore> = Box::new(MemoryReachabilityStore::new()); - let (hash, parent) = (7.into(), 15.into()); - let interval = Interval::maximal(); - store.insert(hash, parent, interval, 5).unwrap(); - let height = store.append_child(hash, 31.into()).unwrap(); - assert_eq!(height, 5); - let children = store.get_children(hash).unwrap(); - println!("{children:?}"); - store.get_interval(7.into()).unwrap(); - println!("{children:?}"); - } -} diff --git a/flexidag/src/consensusdb/consensus_relations.rs b/flexidag/src/consensusdb/consensus_relations.rs deleted file mode 100644 index a34c1c049c..0000000000 --- a/flexidag/src/consensusdb/consensus_relations.rs +++ /dev/null @@ -1,316 +0,0 @@ -use super::schema::{KeyCodec, ValueCodec}; -use super::{ - db::DBStorage, - prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter, StoreError}, -}; -use crate::define_schema; -use rocksdb::WriteBatch; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlockLevel}; -use std::{collections::hash_map::Entry::Vacant, sync::Arc}; - -/// Reader API for `RelationsStore`. -pub trait RelationsStoreReader { - fn get_parents(&self, hash: Hash) -> Result<BlockHashes, StoreError>; - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError>; - fn has(&self, hash: Hash) -> Result<bool, StoreError>; -} - -/// Write API for `RelationsStore`. The insert function is deliberately `mut` -/// since it modifies the children arrays for previously added parents which is -/// non-append-only and thus needs to be guarded. -pub trait RelationsStore: RelationsStoreReader { - /// Inserts `parents` into a new store entry for `hash`, and for each `parent ∈ parents` adds `hash` to `parent.children` - fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError>; -} - -pub(crate) const PARENTS_CF: &str = "block-parents"; -pub(crate) const CHILDREN_CF: &str = "block-children"; - -define_schema!(RelationParent, Hash, Arc<Vec<Hash>>, PARENTS_CF); -define_schema!(RelationChildren, Hash, Arc<Vec<Hash>>, CHILDREN_CF); - -impl KeyCodec<RelationParent> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl ValueCodec<RelationParent> for Arc<Vec<Hash>> { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} -impl KeyCodec<RelationChildren> for Hash { - fn encode_key(&self) -> Result<Vec<u8>, StoreError> { - Ok(self.to_vec()) - } - - fn decode_key(data: &[u8]) -> Result<Self, StoreError> { - Hash::from_slice(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -impl ValueCodec<RelationChildren> for Arc<Vec<Hash>> { - fn encode_value(&self) -> Result<Vec<u8>, StoreError> { - bcs_ext::to_bytes(self).map_err(|e| StoreError::EncodeError(e.to_string())) - } - - fn decode_value(data: &[u8]) -> Result<Self, StoreError> { - bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) - } -} - -/// A DB + cache implementation of `RelationsStore` trait, with concurrent readers support. -#[derive(Clone)] -pub struct DbRelationsStore { - db: Arc<DBStorage>, - level: BlockLevel, - parents_access: CachedDbAccess<RelationParent>, - children_access: CachedDbAccess<RelationChildren>, -} - -impl DbRelationsStore { - pub fn new(db: Arc<DBStorage>, level: BlockLevel, cache_size: u64) -> Self { - Self { - db: Arc::clone(&db), - level, - parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size), - children_access: CachedDbAccess::new(db, cache_size), - } - } - - pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { - Self::new(Arc::clone(&self.db), self.level, cache_size) - } - - pub fn insert_batch( - &mut self, - batch: &mut WriteBatch, - hash: Hash, - parents: BlockHashes, - ) -> Result<(), StoreError> { - if self.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - - // Insert a new entry for `hash` - self.parents_access - .write(BatchDbWriter::new(batch), hash, parents.clone())?; - - // The new hash has no children yet - self.children_access.write( - BatchDbWriter::new(batch), - hash, - BlockHashes::new(Vec::new()), - )?; - - // Update `children` for each parent - for parent in parents.iter().cloned() { - let mut children = (*self.get_children(parent)?).clone(); - children.push(hash); - self.children_access.write( - BatchDbWriter::new(batch), - parent, - BlockHashes::new(children), - )?; - } - - Ok(()) - } -} - -impl RelationsStoreReader for DbRelationsStore { - fn get_parents(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - self.parents_access.read(hash) - } - - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - self.children_access.read(hash) - } - - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - if self.parents_access.has(hash)? { - debug_assert!(self.children_access.has(hash)?); - Ok(true) - } else { - Ok(false) - } - } -} - -impl RelationsStore for DbRelationsStore { - /// See `insert_batch` as well - /// TODO: use one function with DbWriter for both this function and insert_batch - fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { - if self.has(hash)? { - return Err(StoreError::KeyAlreadyExists(hash.to_string())); - } - - // Insert a new entry for `hash` - self.parents_access - .write(DirectDbWriter::new(&self.db), hash, parents.clone())?; - - // The new hash has no children yet - self.children_access.write( - DirectDbWriter::new(&self.db), - hash, - BlockHashes::new(Vec::new()), - )?; - - // Update `children` for each parent - for parent in parents.iter().cloned() { - let mut children = (*self.get_children(parent)?).clone(); - children.push(hash); - self.children_access.write( - DirectDbWriter::new(&self.db), - parent, - BlockHashes::new(children), - )?; - } - - Ok(()) - } -} - -pub struct MemoryRelationsStore { - parents_map: BlockHashMap<BlockHashes>, - children_map: BlockHashMap<BlockHashes>, -} - -impl MemoryRelationsStore { - pub fn new() -> Self { - Self { - parents_map: BlockHashMap::new(), - children_map: BlockHashMap::new(), - } - } -} - -impl Default for MemoryRelationsStore { - fn default() -> Self { - Self::new() - } -} - -impl RelationsStoreReader for MemoryRelationsStore { - fn get_parents(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - match self.parents_map.get(&hash) { - Some(parents) => Ok(BlockHashes::clone(parents)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - match self.children_map.get(&hash) { - Some(children) => Ok(BlockHashes::clone(children)), - None => Err(StoreError::KeyNotFound(hash.to_string())), - } - } - - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - Ok(self.parents_map.contains_key(&hash)) - } -} - -impl RelationsStore for MemoryRelationsStore { - fn insert(&mut self, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { - if let Vacant(e) = self.parents_map.entry(hash) { - // Update the new entry for `hash` - e.insert(BlockHashes::clone(&parents)); - - // Update `children` for each parent - for parent in parents.iter().cloned() { - let mut children = (*self.get_children(parent)?).clone(); - children.push(hash); - self.children_map.insert(parent, BlockHashes::new(children)); - } - - // The new hash has no children yet - self.children_map.insert(hash, BlockHashes::new(Vec::new())); - Ok(()) - } else { - Err(StoreError::KeyAlreadyExists(hash.to_string())) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::consensusdb::{ - db::RelationsStoreConfig, - prelude::{FlexiDagStorage, FlexiDagStorageConfig}, - }; - - #[test] - fn test_memory_relations_store() { - test_relations_store(MemoryRelationsStore::new()); - } - - #[test] - fn test_db_relations_store() { - let db_tempdir = tempfile::tempdir().unwrap(); - let rs_conf = RelationsStoreConfig { - block_level: 0, - cache_size: 2, - }; - let config = FlexiDagStorageConfig::new() - .update_parallelism(1) - .update_relations_conf(rs_conf); - - let db = FlexiDagStorage::create_from_path(db_tempdir.path(), config) - .expect("failed to create flexidag storage"); - test_relations_store(db.relations_store); - } - - fn test_relations_store<T: RelationsStore>(mut store: T) { - let parents = [ - (1, vec![]), - (2, vec![1]), - (3, vec![1]), - (4, vec![2, 3]), - (5, vec![1, 4]), - ]; - for (i, vec) in parents.iter().cloned() { - store - .insert( - i.into(), - BlockHashes::new(vec.iter().copied().map(Hash::from).collect()), - ) - .unwrap(); - } - - let expected_children = [ - (1, vec![2, 3, 5]), - (2, vec![4]), - (3, vec![4]), - (4, vec![5]), - (5, vec![]), - ]; - for (i, vec) in expected_children { - assert!(store - .get_children(i.into()) - .unwrap() - .iter() - .copied() - .eq(vec.iter().copied().map(Hash::from))); - } - - for (i, vec) in parents { - assert!(store - .get_parents(i.into()) - .unwrap() - .iter() - .copied() - .eq(vec.iter().copied().map(Hash::from))); - } - } -} diff --git a/flexidag/src/consensusdb/db.rs b/flexidag/src/consensusdb/db.rs deleted file mode 100644 index 331df80277..0000000000 --- a/flexidag/src/consensusdb/db.rs +++ /dev/null @@ -1,149 +0,0 @@ -use super::{ - error::StoreError, - schemadb::{ - DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, - COMPACT_GHOST_DAG_STORE_CF, COMPACT_HEADER_DATA_STORE_CF, GHOST_DAG_STORE_CF, - HEADERS_STORE_CF, PARENTS_CF, REACHABILITY_DATA_CF, - }, -}; -use starcoin_config::RocksdbConfig; -pub(crate) use starcoin_storage::db_storage::DBStorage; -use std::{path::Path, sync::Arc}; - -#[derive(Clone)] -pub struct FlexiDagStorage { - pub ghost_dag_store: DbGhostdagStore, - pub header_store: DbHeadersStore, - pub reachability_store: DbReachabilityStore, - pub relations_store: DbRelationsStore, -} - -#[derive(Clone, Default)] -pub struct GhostDagStoreConfig { - pub block_level: u8, - pub cache_size: u64, -} - -#[derive(Clone, Default)] -pub struct HeaderStoreConfig { - pub cache_size: u64, -} - -#[derive(Clone, Default)] -pub struct ReachabilityStoreConfig { - pub cache_size: u64, -} - -#[derive(Clone, Default)] -pub struct RelationsStoreConfig { - pub block_level: u8, - pub cache_size: u64, -} - -#[derive(Clone, Default)] -pub struct FlexiDagStorageConfig { - pub parallelism: u64, - pub gds_conf: GhostDagStoreConfig, - pub hs_conf: HeaderStoreConfig, - pub rbs_conf: ReachabilityStoreConfig, - pub rs_conf: RelationsStoreConfig, -} - -impl FlexiDagStorageConfig { - pub fn new() -> Self { - FlexiDagStorageConfig::default() - } - - pub fn create_with_params(parallelism: u64, block_level: u8, cache_size: u64) -> Self { - Self { - parallelism, - gds_conf: GhostDagStoreConfig { - block_level, - cache_size, - }, - hs_conf: HeaderStoreConfig { cache_size }, - rbs_conf: ReachabilityStoreConfig { cache_size }, - rs_conf: RelationsStoreConfig { - block_level, - cache_size, - }, - } - } - - pub fn update_parallelism(mut self, parallelism: u64) -> Self { - self.parallelism = parallelism; - self - } - - pub fn update_ghost_dag_conf(mut self, gds_conf: GhostDagStoreConfig) -> Self { - self.gds_conf = gds_conf; - self - } - - pub fn update_headers_conf(mut self, hs_conf: HeaderStoreConfig) -> Self { - self.hs_conf = hs_conf; - self - } - - pub fn update_reachability_conf(mut self, rbs_conf: ReachabilityStoreConfig) -> Self { - self.rbs_conf = rbs_conf; - self - } - - pub fn update_relations_conf(mut self, rs_conf: RelationsStoreConfig) -> Self { - self.rs_conf = rs_conf; - self - } -} - -impl FlexiDagStorage { - /// Creates or loads an existing storage from the provided directory path. - pub fn create_from_path<P: AsRef<Path>>( - db_path: P, - config: FlexiDagStorageConfig, - ) -> Result<Self, StoreError> { - let rocksdb_config = RocksdbConfig { - parallelism: config.parallelism, - ..Default::default() - }; - - let db = Arc::new( - DBStorage::open_with_cfs( - db_path, - vec![ - // consensus headers - HEADERS_STORE_CF, - COMPACT_HEADER_DATA_STORE_CF, - // consensus relations - PARENTS_CF, - CHILDREN_CF, - // consensus reachability - REACHABILITY_DATA_CF, - // consensus ghostdag - GHOST_DAG_STORE_CF, - COMPACT_GHOST_DAG_STORE_CF, - ], - false, - rocksdb_config, - None, - ) - .map_err(|e| StoreError::DBIoError(e.to_string()))?, - ); - - Ok(Self { - ghost_dag_store: DbGhostdagStore::new( - db.clone(), - config.gds_conf.block_level, - config.gds_conf.cache_size, - ), - - header_store: DbHeadersStore::new(db.clone(), config.hs_conf.cache_size), - reachability_store: DbReachabilityStore::new(db.clone(), config.rbs_conf.cache_size), - relations_store: DbRelationsStore::new( - db, - config.rs_conf.block_level, - config.rs_conf.cache_size, - ), - }) - } -} diff --git a/flexidag/src/consensusdb/error.rs b/flexidag/src/consensusdb/error.rs deleted file mode 100644 index ff2c199c93..0000000000 --- a/flexidag/src/consensusdb/error.rs +++ /dev/null @@ -1,58 +0,0 @@ -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum StoreError { - #[error("key {0} not found in store")] - KeyNotFound(String), - - #[error("key {0} already exists in store")] - KeyAlreadyExists(String), - - #[error("column family {0} not exist in db")] - CFNotExist(String), - - #[error("IO error {0}")] - DBIoError(String), - - #[error("rocksdb error {0}")] - DbError(#[from] rocksdb::Error), - - #[error("encode error {0}")] - EncodeError(String), - - #[error("decode error {0}")] - DecodeError(String), - - #[error("ghostdag {0} duplicate blocks")] - DAGDupBlocksError(String), -} - -pub type StoreResult<T> = std::result::Result<T, StoreError>; - -pub trait StoreResultExtensions<T> { - fn unwrap_option(self) -> Option<T>; -} - -impl<T> StoreResultExtensions<T> for StoreResult<T> { - fn unwrap_option(self) -> Option<T> { - match self { - Ok(value) => Some(value), - Err(StoreError::KeyNotFound(_)) => None, - Err(err) => panic!("Unexpected store error: {err:?}"), - } - } -} - -pub trait StoreResultEmptyTuple { - fn unwrap_and_ignore_key_already_exists(self); -} - -impl StoreResultEmptyTuple for StoreResult<()> { - fn unwrap_and_ignore_key_already_exists(self) { - match self { - Ok(_) => (), - Err(StoreError::KeyAlreadyExists(_)) => (), - Err(err) => panic!("Unexpected store error: {err:?}"), - } - } -} diff --git a/flexidag/src/consensusdb/item.rs b/flexidag/src/consensusdb/item.rs deleted file mode 100644 index 0d27b9c347..0000000000 --- a/flexidag/src/consensusdb/item.rs +++ /dev/null @@ -1,81 +0,0 @@ -use super::prelude::DbWriter; -use super::schema::{KeyCodec, Schema, ValueCodec}; -use super::{db::DBStorage, error::StoreError}; -use parking_lot::RwLock; -use starcoin_storage::storage::RawDBStorage; -use std::sync::Arc; - -/// A cached DB item with concurrency support -#[derive(Clone)] -pub struct CachedDbItem<S: Schema> { - db: Arc<DBStorage>, - key: S::Key, - cached_item: Arc<RwLock<Option<S::Value>>>, -} - -impl<S: Schema> CachedDbItem<S> { - pub fn new(db: Arc<DBStorage>, key: S::Key) -> Self { - Self { - db, - key, - cached_item: Arc::new(RwLock::new(None)), - } - } - - pub fn read(&self) -> Result<S::Value, StoreError> { - if let Some(item) = self.cached_item.read().clone() { - return Ok(item); - } - if let Some(slice) = self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - { - let item = S::Value::decode_value(&slice)?; - *self.cached_item.write() = Some(item.clone()); - Ok(item) - } else { - Err(StoreError::KeyNotFound( - String::from_utf8(self.key.encode_key()?) - .unwrap_or(("unrecoverable key string").to_string()), - )) - } - } - - pub fn write(&mut self, mut writer: impl DbWriter, item: &S::Value) -> Result<(), StoreError> { - *self.cached_item.write() = Some(item.clone()); - writer.put::<S>(&self.key, item)?; - Ok(()) - } - - pub fn remove(&mut self, mut writer: impl DbWriter) -> Result<(), StoreError> -where { - *self.cached_item.write() = None; - writer.delete::<S>(&self.key)?; - Ok(()) - } - - pub fn update<F>(&mut self, mut writer: impl DbWriter, op: F) -> Result<S::Value, StoreError> - where - F: Fn(S::Value) -> S::Value, - { - let mut guard = self.cached_item.write(); - let mut item = if let Some(item) = guard.take() { - item - } else if let Some(slice) = self - .db - .raw_get_pinned_cf(S::COLUMN_FAMILY, &self.key.encode_key()?) - .map_err(|_| StoreError::CFNotExist(S::COLUMN_FAMILY.to_string()))? - { - let item = S::Value::decode_value(&slice)?; - item - } else { - return Err(StoreError::KeyNotFound("".to_string())); - }; - - item = op(item); // Apply the update op - *guard = Some(item.clone()); - writer.put::<S>(&self.key, &item)?; - Ok(item) - } -} diff --git a/flexidag/src/consensusdb/mod.rs b/flexidag/src/consensusdb/mod.rs deleted file mode 100644 index 5aaa7c6ef2..0000000000 --- a/flexidag/src/consensusdb/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -mod access; -mod cache; -mod consensus_ghostdag; -mod consensus_header; -mod consensus_reachability; -pub mod consensus_relations; -mod db; -mod error; -mod item; -pub mod schema; -mod writer; - -pub mod prelude { - use super::{db, error}; - - pub use super::{ - access::CachedDbAccess, - cache::DagCache, - item::CachedDbItem, - writer::{BatchDbWriter, DbWriter, DirectDbWriter}, - }; - pub use db::{FlexiDagStorage, FlexiDagStorageConfig}; - pub use error::{StoreError, StoreResult, StoreResultEmptyTuple, StoreResultExtensions}; -} - -pub mod schemadb { - pub use super::{ - consensus_ghostdag::*, consensus_header::*, consensus_reachability::*, - consensus_relations::*, - }; -} diff --git a/flexidag/src/consensusdb/schema.rs b/flexidag/src/consensusdb/schema.rs deleted file mode 100644 index ad1bbc072f..0000000000 --- a/flexidag/src/consensusdb/schema.rs +++ /dev/null @@ -1,40 +0,0 @@ -use super::error::StoreError; -use core::hash::Hash; -use std::fmt::Debug; -use std::result::Result; - -pub trait KeyCodec<S: Schema + ?Sized>: Clone + Sized + Debug + Send + Sync { - /// Converts `self` to bytes to be stored in DB. - fn encode_key(&self) -> Result<Vec<u8>, StoreError>; - /// Converts bytes fetched from DB to `Self`. - fn decode_key(data: &[u8]) -> Result<Self, StoreError>; -} - -pub trait ValueCodec<S: Schema + ?Sized>: Clone + Sized + Debug + Send + Sync { - /// Converts `self` to bytes to be stored in DB. - fn encode_value(&self) -> Result<Vec<u8>, StoreError>; - /// Converts bytes fetched from DB to `Self`. - fn decode_value(data: &[u8]) -> Result<Self, StoreError>; -} - -pub trait Schema: Debug + Send + Sync + 'static { - const COLUMN_FAMILY: &'static str; - - type Key: KeyCodec<Self> + Hash + Eq + Default; - type Value: ValueCodec<Self> + Default + Clone; -} - -#[macro_export] -macro_rules! define_schema { - ($schema_type: ident, $key_type: ty, $value_type: ty, $cf_name: expr) => { - #[derive(Clone, Debug)] - pub(crate) struct $schema_type; - - impl $crate::schema::Schema for $schema_type { - type Key = $key_type; - type Value = $value_type; - - const COLUMN_FAMILY: &'static str = $cf_name; - } - }; -} diff --git a/flexidag/src/consensusdb/writer.rs b/flexidag/src/consensusdb/writer.rs deleted file mode 100644 index 717d7d7e1c..0000000000 --- a/flexidag/src/consensusdb/writer.rs +++ /dev/null @@ -1,75 +0,0 @@ -use rocksdb::WriteBatch; -use starcoin_storage::storage::InnerStore; - -use super::schema::{KeyCodec, Schema, ValueCodec}; -use super::{db::DBStorage, error::StoreError}; - -/// Abstraction over direct/batched DB writing -pub trait DbWriter { - fn put<S: Schema>(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError>; - fn delete<S: Schema>(&mut self, key: &S::Key) -> Result<(), StoreError>; -} - -pub struct DirectDbWriter<'a> { - db: &'a DBStorage, -} - -impl<'a> DirectDbWriter<'a> { - pub fn new(db: &'a DBStorage) -> Self { - Self { db } - } -} - -impl DbWriter for DirectDbWriter<'_> { - fn put<S: Schema>(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { - let bin_key = key.encode_key()?; - let bin_data = value.encode_value()?; - self.db - .put(S::COLUMN_FAMILY, bin_key, bin_data) - .map_err(|e| StoreError::DBIoError(e.to_string())) - } - - fn delete<S: Schema>(&mut self, key: &S::Key) -> Result<(), StoreError> { - let key = key.encode_key()?; - self.db - .remove(S::COLUMN_FAMILY, key) - .map_err(|e| StoreError::DBIoError(e.to_string())) - } -} - -pub struct BatchDbWriter<'a> { - batch: &'a mut WriteBatch, -} - -impl<'a> BatchDbWriter<'a> { - pub fn new(batch: &'a mut WriteBatch) -> Self { - Self { batch } - } -} - -impl DbWriter for BatchDbWriter<'_> { - fn put<S: Schema>(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { - let key = key.encode_key()?; - let value = value.encode_value()?; - self.batch.put(key, value); - Ok(()) - } - - fn delete<S: Schema>(&mut self, key: &S::Key) -> Result<(), StoreError> { - let key = key.encode_key()?; - self.batch.delete(key); - Ok(()) - } -} - -impl<T: DbWriter> DbWriter for &mut T { - #[inline] - fn put<S: Schema>(&mut self, key: &S::Key, value: &S::Value) -> Result<(), StoreError> { - (*self).put::<S>(key, value) - } - - #[inline] - fn delete<S: Schema>(&mut self, key: &S::Key) -> Result<(), StoreError> { - (*self).delete::<S>(key) - } -} diff --git a/flexidag/src/dag/ghostdag/mergeset.rs b/flexidag/src/dag/ghostdag/mergeset.rs deleted file mode 100644 index 79aefe2db7..0000000000 --- a/flexidag/src/dag/ghostdag/mergeset.rs +++ /dev/null @@ -1,71 +0,0 @@ -use super::protocol::GhostdagManager; -use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; -use crate::dag::reachability::reachability_service::ReachabilityService; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlockHashSet; -use std::collections::VecDeque; - -impl< - T: GhostdagStoreReader, - S: RelationsStoreReader, - U: ReachabilityService, - V: HeaderStoreReader, - > GhostdagManager<T, S, U, V> -{ - pub fn ordered_mergeset_without_selected_parent( - &self, - selected_parent: Hash, - parents: &[Hash], - ) -> Vec<Hash> { - self.sort_blocks(self.unordered_mergeset_without_selected_parent(selected_parent, parents)) - } - - pub fn unordered_mergeset_without_selected_parent( - &self, - selected_parent: Hash, - parents: &[Hash], - ) -> BlockHashSet { - let mut queue: VecDeque<_> = parents - .iter() - .copied() - .filter(|p| p != &selected_parent) - .collect(); - let mut mergeset: BlockHashSet = queue.iter().copied().collect(); - let mut selected_parent_past = BlockHashSet::new(); - - while let Some(current) = queue.pop_front() { - let current_parents = self - .relations_store - .get_parents(current) - .unwrap_or_else(|err| { - println!("WUT"); - panic!("{err:?}"); - }); - - // For each parent of the current block we check whether it is in the past of the selected parent. If not, - // we add it to the resulting merge-set and queue it for further processing. - for parent in current_parents.iter() { - if mergeset.contains(parent) { - continue; - } - - if selected_parent_past.contains(parent) { - continue; - } - - if self - .reachability_service - .is_dag_ancestor_of(*parent, selected_parent) - { - selected_parent_past.insert(*parent); - continue; - } - - mergeset.insert(*parent); - queue.push_back(*parent); - } - } - - mergeset - } -} diff --git a/flexidag/src/dag/ghostdag/mod.rs b/flexidag/src/dag/ghostdag/mod.rs deleted file mode 100644 index 51a2c8fc82..0000000000 --- a/flexidag/src/dag/ghostdag/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod mergeset; -pub mod protocol; - -mod util; diff --git a/flexidag/src/dag/ghostdag/protocol.rs b/flexidag/src/dag/ghostdag/protocol.rs deleted file mode 100644 index 9afc86d3bd..0000000000 --- a/flexidag/src/dag/ghostdag/protocol.rs +++ /dev/null @@ -1,338 +0,0 @@ -use super::util::Refs; -use crate::consensusdb::schemadb::{GhostdagStoreReader, HeaderStoreReader, RelationsStoreReader}; -use crate::dag::reachability::reachability_service::ReachabilityService; -use crate::dag::types::{ghostdata::GhostdagData, ordering::*}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{ - self, BlockHashExtensions, BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType, -}; -use std::sync::Arc; -// For GhostdagStoreReader-related functions, use GhostDagDataWrapper instead. -// ascending_mergeset_without_selected_parent -// descending_mergeset_without_selected_parent -// consensus_ordered_mergeset -// consensus_ordered_mergeset_without_selected_parent -//use dag_database::consensus::GhostDagDataWrapper; - -#[derive(Clone)] -pub struct GhostdagManager< - T: GhostdagStoreReader, - S: RelationsStoreReader, - U: ReachabilityService, - V: HeaderStoreReader, -> { - genesis_hash: Hash, - pub(super) k: KType, - pub(super) ghostdag_store: T, - pub(super) relations_store: S, - pub(super) headers_store: V, - pub(super) reachability_service: U, -} - -impl< - T: GhostdagStoreReader, - S: RelationsStoreReader, - U: ReachabilityService, - V: HeaderStoreReader, - > GhostdagManager<T, S, U, V> -{ - pub fn new( - genesis_hash: Hash, - k: KType, - ghostdag_store: T, - relations_store: S, - headers_store: V, - reachability_service: U, - ) -> Self { - Self { - genesis_hash, - k, - ghostdag_store, - relations_store, - reachability_service, - headers_store, - } - } - - pub fn genesis_ghostdag_data(&self) -> GhostdagData { - GhostdagData::new( - 0, - Default::default(), // TODO: take blue score and work from actual genesis - Hash::new(blockhash::ORIGIN), - BlockHashes::new(Vec::new()), - BlockHashes::new(Vec::new()), - HashKTypeMap::new(BlockHashMap::new()), - ) - } - - pub fn origin_ghostdag_data(&self) -> Arc<GhostdagData> { - Arc::new(GhostdagData::new( - 0, - Default::default(), - 0.into(), - BlockHashes::new(Vec::new()), - BlockHashes::new(Vec::new()), - HashKTypeMap::new(BlockHashMap::new()), - )) - } - - pub fn find_selected_parent(&self, parents: impl IntoIterator<Item = Hash>) -> Hash { - parents - .into_iter() - .map(|parent| SortableBlock { - hash: parent, - blue_work: self.ghostdag_store.get_blue_work(parent).unwrap(), - }) - .max() - .unwrap() - .hash - } - - /// Runs the GHOSTDAG protocol and calculates the block GhostdagData by the given parents. - /// The function calculates mergeset blues by iterating over the blocks in - /// the anticone of the new block selected parent (which is the parent with the - /// highest blue work) and adds any block to the blue set if by adding - /// it these conditions will not be violated: - /// - /// 1) |anticone-of-candidate-block ∩ blue-set-of-new-block| ≤ K - /// - /// 2) For every blue block in blue-set-of-new-block: - /// |(anticone-of-blue-block ∩ blue-set-new-block) ∪ {candidate-block}| ≤ K. - /// We validate this condition by maintaining a map blues_anticone_sizes for - /// each block which holds all the blue anticone sizes that were affected by - /// the new added blue blocks. - /// So to find out what is |anticone-of-blue ∩ blue-set-of-new-block| we just iterate in - /// the selected parent chain of the new block until we find an existing entry in - /// blues_anticone_sizes. - /// - /// For further details see the article https://eprint.iacr.org/2018/104.pdf - pub fn ghostdag(&self, parents: &[Hash]) -> GhostdagData { - assert!( - !parents.is_empty(), - "genesis must be added via a call to init" - ); - - // Run the GHOSTDAG parent selection algorithm - let selected_parent = self.find_selected_parent(&mut parents.iter().copied()); - // Initialize new GHOSTDAG block data with the selected parent - let mut new_block_data = GhostdagData::new_with_selected_parent(selected_parent, self.k); - // Get the mergeset in consensus-agreed topological order (topological here means forward in time from blocks to children) - let ordered_mergeset = - self.ordered_mergeset_without_selected_parent(selected_parent, parents); - - for blue_candidate in ordered_mergeset.iter().cloned() { - let coloring = self.check_blue_candidate(&new_block_data, blue_candidate); - - if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { - // No k-cluster violation found, we can now set the candidate block as blue - new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); - } else { - new_block_data.add_red(blue_candidate); - } - } - - let blue_score = self - .ghostdag_store - .get_blue_score(selected_parent) - .unwrap() - .checked_add(new_block_data.mergeset_blues.len() as u64) - .unwrap(); - - let added_blue_work: BlueWorkType = new_block_data - .mergeset_blues - .iter() - .cloned() - .map(|hash| { - if hash.is_origin() { - 0u128 - } else { - //TODO: implement caculate pow work - let _difficulty = self.headers_store.get_difficulty(hash).unwrap(); - 1024u128 - } - }) - .sum(); - - let blue_work = self - .ghostdag_store - .get_blue_work(selected_parent) - .unwrap() - .checked_add(added_blue_work) - .unwrap(); - new_block_data.finalize_score_and_work(blue_score, blue_work); - - new_block_data - } - - fn check_blue_candidate_with_chain_block( - &self, - new_block_data: &GhostdagData, - chain_block: &ChainBlock, - blue_candidate: Hash, - candidate_blues_anticone_sizes: &mut BlockHashMap<KType>, - candidate_blue_anticone_size: &mut KType, - ) -> ColoringState { - // If blue_candidate is in the future of chain_block, it means - // that all remaining blues are in the past of chain_block and thus - // in the past of blue_candidate. In this case we know for sure that - // the anticone of blue_candidate will not exceed K, and we can mark - // it as blue. - // - // The new block is always in the future of blue_candidate, so there's - // no point in checking it. - - // We check if chain_block is not the new block by checking if it has a hash. - if let Some(hash) = chain_block.hash { - if self - .reachability_service - .is_dag_ancestor_of(hash, blue_candidate) - { - return ColoringState::Blue; - } - } - - for &block in chain_block.data.mergeset_blues.iter() { - // Skip blocks that exist in the past of blue_candidate. - if self - .reachability_service - .is_dag_ancestor_of(block, blue_candidate) - { - continue; - } - - candidate_blues_anticone_sizes - .insert(block, self.blue_anticone_size(block, new_block_data)); - - *candidate_blue_anticone_size = (*candidate_blue_anticone_size).checked_add(1).unwrap(); - if *candidate_blue_anticone_size > self.k { - // k-cluster violation: The candidate's blue anticone exceeded k - return ColoringState::Red; - } - - if *candidate_blues_anticone_sizes.get(&block).unwrap() == self.k { - // k-cluster violation: A block in candidate's blue anticone already - // has k blue blocks in its own anticone - return ColoringState::Red; - } - - // This is a sanity check that validates that a blue - // block's blue anticone is not already larger than K. - assert!( - *candidate_blues_anticone_sizes.get(&block).unwrap() <= self.k, - "found blue anticone larger than K" - ); - } - - ColoringState::Pending - } - - /// Returns the blue anticone size of `block` from the worldview of `context`. - /// Expects `block` to be in the blue set of `context` - fn blue_anticone_size(&self, block: Hash, context: &GhostdagData) -> KType { - let mut current_blues_anticone_sizes = HashKTypeMap::clone(&context.blues_anticone_sizes); - let mut current_selected_parent = context.selected_parent; - loop { - if let Some(size) = current_blues_anticone_sizes.get(&block) { - return *size; - } - - if current_selected_parent == self.genesis_hash - || current_selected_parent == Hash::new(blockhash::ORIGIN) - { - panic!("block {block} is not in blue set of the given context"); - } - - current_blues_anticone_sizes = self - .ghostdag_store - .get_blues_anticone_sizes(current_selected_parent) - .unwrap(); - current_selected_parent = self - .ghostdag_store - .get_selected_parent(current_selected_parent) - .unwrap(); - } - } - - pub fn check_blue_candidate( - &self, - new_block_data: &GhostdagData, - blue_candidate: Hash, - ) -> ColoringOutput { - // The maximum length of new_block_data.mergeset_blues can be K+1 because - // it contains the selected parent. - if new_block_data.mergeset_blues.len() as KType == self.k.checked_add(1).unwrap() { - return ColoringOutput::Red; - } - - let mut candidate_blues_anticone_sizes: BlockHashMap<KType> = - BlockHashMap::with_capacity(self.k as usize); - // Iterate over all blocks in the blue past of the new block that are not in the past - // of blue_candidate, and check for each one of them if blue_candidate potentially - // enlarges their blue anticone to be over K, or that they enlarge the blue anticone - // of blue_candidate to be over K. - let mut chain_block = ChainBlock { - hash: None, - data: new_block_data.into(), - }; - let mut candidate_blue_anticone_size: KType = 0; - - loop { - let state = self.check_blue_candidate_with_chain_block( - new_block_data, - &chain_block, - blue_candidate, - &mut candidate_blues_anticone_sizes, - &mut candidate_blue_anticone_size, - ); - - match state { - ColoringState::Blue => { - return ColoringOutput::Blue( - candidate_blue_anticone_size, - candidate_blues_anticone_sizes, - ) - } - ColoringState::Red => return ColoringOutput::Red, - ColoringState::Pending => (), // continue looping - } - - chain_block = ChainBlock { - hash: Some(chain_block.data.selected_parent), - data: self - .ghostdag_store - .get_data(chain_block.data.selected_parent) - .unwrap() - .into(), - } - } - } - - pub fn sort_blocks(&self, blocks: impl IntoIterator<Item = Hash>) -> Vec<Hash> { - let mut sorted_blocks: Vec<Hash> = blocks.into_iter().collect(); - sorted_blocks.sort_by_cached_key(|block| SortableBlock { - hash: *block, - blue_work: self.ghostdag_store.get_blue_work(*block).unwrap(), - }); - sorted_blocks - } -} - -/// Chain block with attached ghostdag data -struct ChainBlock<'a> { - hash: Option<Hash>, // if set to `None`, signals being the new block - data: Refs<'a, GhostdagData>, -} - -/// Represents the intermediate GHOSTDAG coloring state for the current candidate -enum ColoringState { - Blue, - Red, - Pending, -} - -#[derive(Debug)] -/// Represents the final output of GHOSTDAG coloring for the current candidate -pub enum ColoringOutput { - Blue(KType, BlockHashMap<KType>), // (blue anticone size, map of blue anticone sizes for each affected blue) - Red, -} diff --git a/flexidag/src/dag/ghostdag/util.rs b/flexidag/src/dag/ghostdag/util.rs deleted file mode 100644 index 68eb4b9b31..0000000000 --- a/flexidag/src/dag/ghostdag/util.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::{ops::Deref, rc::Rc, sync::Arc}; -/// Enum used to represent a concrete varying pointer type which only needs to be accessed by ref. -/// We avoid adding a `Val(T)` variant in order to keep the size of the enum minimal -pub enum Refs<'a, T> { - Ref(&'a T), - Arc(Arc<T>), - Rc(Rc<T>), - Box(Box<T>), -} - -impl<T> AsRef<T> for Refs<'_, T> { - fn as_ref(&self) -> &T { - match self { - Refs::Ref(r) => r, - Refs::Arc(a) => a, - Refs::Rc(r) => r, - Refs::Box(b) => b, - } - } -} - -impl<T> Deref for Refs<'_, T> { - type Target = T; - - fn deref(&self) -> &Self::Target { - match self { - Refs::Ref(r) => r, - Refs::Arc(a) => a, - Refs::Rc(r) => r, - Refs::Box(b) => b, - } - } -} - -impl<'a, T> From<&'a T> for Refs<'a, T> { - fn from(r: &'a T) -> Self { - Self::Ref(r) - } -} - -impl<T> From<Arc<T>> for Refs<'_, T> { - fn from(a: Arc<T>) -> Self { - Self::Arc(a) - } -} - -impl<T> From<Rc<T>> for Refs<'_, T> { - fn from(r: Rc<T>) -> Self { - Self::Rc(r) - } -} - -impl<T> From<Box<T>> for Refs<'_, T> { - fn from(b: Box<T>) -> Self { - Self::Box(b) - } -} diff --git a/flexidag/src/dag/mod.rs b/flexidag/src/dag/mod.rs deleted file mode 100644 index dea837bcff..0000000000 --- a/flexidag/src/dag/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod ghostdag; -pub mod reachability; -pub mod types; diff --git a/flexidag/src/dag/reachability/extensions.rs b/flexidag/src/dag/reachability/extensions.rs deleted file mode 100644 index 9ea769fb9a..0000000000 --- a/flexidag/src/dag/reachability/extensions.rs +++ /dev/null @@ -1,50 +0,0 @@ -use crate::consensusdb::{prelude::StoreResult, schemadb::ReachabilityStoreReader}; -use crate::dag::types::interval::Interval; -use starcoin_crypto::hash::HashValue as Hash; - -pub(super) trait ReachabilityStoreIntervalExtensions { - fn interval_children_capacity(&self, block: Hash) -> StoreResult<Interval>; - fn interval_remaining_before(&self, block: Hash) -> StoreResult<Interval>; - fn interval_remaining_after(&self, block: Hash) -> StoreResult<Interval>; -} - -impl<T: ReachabilityStoreReader + ?Sized> ReachabilityStoreIntervalExtensions for T { - /// Returns the reachability allocation capacity for children of `block` - fn interval_children_capacity(&self, block: Hash) -> StoreResult<Interval> { - // The interval of a block should *strictly* contain the intervals of its - // tree children, hence we subtract 1 from the end of the range. - Ok(self.get_interval(block)?.decrease_end(1)) - } - - /// Returns the available interval to allocate for tree children, taken from the - /// beginning of children allocation capacity - fn interval_remaining_before(&self, block: Hash) -> StoreResult<Interval> { - let alloc_capacity = self.interval_children_capacity(block)?; - match self.get_children(block)?.first() { - Some(first_child) => { - let first_alloc = self.get_interval(*first_child)?; - Ok(Interval::new( - alloc_capacity.start, - first_alloc.start.checked_sub(1).unwrap(), - )) - } - None => Ok(alloc_capacity), - } - } - - /// Returns the available interval to allocate for tree children, taken from the - /// end of children allocation capacity - fn interval_remaining_after(&self, block: Hash) -> StoreResult<Interval> { - let alloc_capacity = self.interval_children_capacity(block)?; - match self.get_children(block)?.last() { - Some(last_child) => { - let last_alloc = self.get_interval(*last_child)?; - Ok(Interval::new( - last_alloc.end.checked_add(1).unwrap(), - alloc_capacity.end, - )) - } - None => Ok(alloc_capacity), - } - } -} diff --git a/flexidag/src/dag/reachability/inquirer.rs b/flexidag/src/dag/reachability/inquirer.rs deleted file mode 100644 index 022a71074b..0000000000 --- a/flexidag/src/dag/reachability/inquirer.rs +++ /dev/null @@ -1,345 +0,0 @@ -use super::{tree::*, *}; -use crate::consensusdb::schemadb::{ReachabilityStore, ReachabilityStoreReader}; -use crate::dag::types::{interval::Interval, perf}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash; - -/// Init the reachability store to match the state required by the algorithmic layer. -/// The function first checks the store for possibly being initialized already. -pub fn init(store: &mut (impl ReachabilityStore + ?Sized)) -> Result<()> { - init_with_params(store, Hash::new(blockhash::ORIGIN), Interval::maximal()) -} - -pub(super) fn init_with_params( - store: &mut (impl ReachabilityStore + ?Sized), - origin: Hash, - capacity: Interval, -) -> Result<()> { - if store.has(origin)? { - return Ok(()); - } - store.init(origin, capacity)?; - Ok(()) -} - -type HashIterator<'a> = &'a mut dyn Iterator<Item = Hash>; - -/// Add a block to the DAG reachability data structures and persist using the provided `store`. -pub fn add_block( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - selected_parent: Hash, - mergeset_iterator: HashIterator, -) -> Result<()> { - add_block_with_params( - store, - new_block, - selected_parent, - mergeset_iterator, - None, - None, - ) -} - -fn add_block_with_params( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - selected_parent: Hash, - mergeset_iterator: HashIterator, - reindex_depth: Option<u64>, - reindex_slack: Option<u64>, -) -> Result<()> { - add_tree_block( - store, - new_block, - selected_parent, - reindex_depth.unwrap_or(perf::DEFAULT_REINDEX_DEPTH), - reindex_slack.unwrap_or(perf::DEFAULT_REINDEX_SLACK), - )?; - add_dag_block(store, new_block, mergeset_iterator)?; - Ok(()) -} - -fn add_dag_block( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - mergeset_iterator: HashIterator, -) -> Result<()> { - // Update the future covering set for blocks in the mergeset - for merged_block in mergeset_iterator { - insert_to_future_covering_set(store, merged_block, new_block)?; - } - Ok(()) -} - -fn insert_to_future_covering_set( - store: &mut (impl ReachabilityStore + ?Sized), - merged_block: Hash, - new_block: Hash, -) -> Result<()> { - match binary_search_descendant( - store, - store.get_future_covering_set(merged_block)?.as_slice(), - new_block, - )? { - // We expect the query to not succeed, and to only return the correct insertion index. - // The existences of a `future covering item` (`FCI`) which is a chain ancestor of `new_block` - // contradicts `merged_block ∈ mergeset(new_block)`. Similarly, the existence of an FCI - // which `new_block` is a chain ancestor of, contradicts processing order. - SearchOutput::Found(_, _) => Err(ReachabilityError::DataInconsistency), - SearchOutput::NotFound(i) => { - store.insert_future_covering_item(merged_block, new_block, i)?; - Ok(()) - } - } -} - -/// Hint to the reachability algorithm that `hint` is a candidate to become -/// the `virtual selected parent` (`VSP`). This might affect internal reachability heuristics such -/// as moving the reindex point. The consensus runtime is expected to call this function -/// for a new header selected tip which is `header only` / `pending UTXO verification`, or for a completely resolved `VSP`. -pub fn hint_virtual_selected_parent( - store: &mut (impl ReachabilityStore + ?Sized), - hint: Hash, -) -> Result<()> { - try_advancing_reindex_root( - store, - hint, - perf::DEFAULT_REINDEX_DEPTH, - perf::DEFAULT_REINDEX_SLACK, - ) -} - -/// Checks if the `this` block is a strict chain ancestor of the `queried` block (aka `this ∈ chain(queried)`). -/// Note that this results in `false` if `this == queried` -pub fn is_strict_chain_ancestor_of( - store: &(impl ReachabilityStoreReader + ?Sized), - this: Hash, - queried: Hash, -) -> Result<bool> { - Ok(store - .get_interval(this)? - .strictly_contains(store.get_interval(queried)?)) -} - -/// Checks if `this` block is a chain ancestor of `queried` block (aka `this ∈ chain(queried) ∪ {queried}`). -/// Note that we use the graph theory convention here which defines that a block is also an ancestor of itself. -pub fn is_chain_ancestor_of( - store: &(impl ReachabilityStoreReader + ?Sized), - this: Hash, - queried: Hash, -) -> Result<bool> { - Ok(store - .get_interval(this)? - .contains(store.get_interval(queried)?)) -} - -/// Returns true if `this` is a DAG ancestor of `queried` (aka `queried ∈ future(this) ∪ {this}`). -/// Note: this method will return true if `this == queried`. -/// The complexity of this method is O(log(|future_covering_set(this)|)) -pub fn is_dag_ancestor_of( - store: &(impl ReachabilityStoreReader + ?Sized), - this: Hash, - queried: Hash, -) -> Result<bool> { - // First, check if `this` is a chain ancestor of queried - if is_chain_ancestor_of(store, this, queried)? { - return Ok(true); - } - // Otherwise, use previously registered future blocks to complete the - // DAG reachability test - match binary_search_descendant( - store, - store.get_future_covering_set(this)?.as_slice(), - queried, - )? { - SearchOutput::Found(_, _) => Ok(true), - SearchOutput::NotFound(_) => Ok(false), - } -} - -/// Finds the child of `ancestor` which is also a chain ancestor of `descendant`. -pub fn get_next_chain_ancestor( - store: &(impl ReachabilityStoreReader + ?Sized), - descendant: Hash, - ancestor: Hash, -) -> Result<Hash> { - if descendant == ancestor { - // The next ancestor does not exist - return Err(ReachabilityError::BadQuery); - } - if !is_strict_chain_ancestor_of(store, ancestor, descendant)? { - // `ancestor` isn't actually a chain ancestor of `descendant`, so by def - // we cannot find the next ancestor as well - return Err(ReachabilityError::BadQuery); - } - - get_next_chain_ancestor_unchecked(store, descendant, ancestor) -} - -/// Note: it is important to keep the unchecked version for internal module use, -/// since in some scenarios during reindexing `descendant` might have a modified -/// interval which was not propagated yet. -pub(super) fn get_next_chain_ancestor_unchecked( - store: &(impl ReachabilityStoreReader + ?Sized), - descendant: Hash, - ancestor: Hash, -) -> Result<Hash> { - match binary_search_descendant(store, store.get_children(ancestor)?.as_slice(), descendant)? { - SearchOutput::Found(hash, _) => Ok(hash), - SearchOutput::NotFound(_) => Err(ReachabilityError::BadQuery), - } -} - -enum SearchOutput { - NotFound(usize), // `usize` is the position to insert at - Found(Hash, usize), -} - -fn binary_search_descendant( - store: &(impl ReachabilityStoreReader + ?Sized), - ordered_hashes: &[Hash], - descendant: Hash, -) -> Result<SearchOutput> { - if cfg!(debug_assertions) { - // This is a linearly expensive assertion, keep it debug only - assert_hashes_ordered(store, ordered_hashes); - } - - // `Interval::end` represents the unique number allocated to this block - let point = store.get_interval(descendant)?.end; - - // We use an `unwrap` here since otherwise we need to implement `binary_search` - // ourselves, which is not worth the effort given that this would be an unrecoverable - // error anyhow - match ordered_hashes.binary_search_by_key(&point, |c| store.get_interval(*c).unwrap().start) { - Ok(i) => Ok(SearchOutput::Found(ordered_hashes[i], i)), - Err(i) => { - // `i` is where `point` was expected (i.e., point < ordered_hashes[i].interval.start), - // so we expect `ordered_hashes[i - 1].interval` to be the only candidate to contain `point` - if i > 0 - && is_chain_ancestor_of( - store, - ordered_hashes[i.checked_sub(1).unwrap()], - descendant, - )? - { - Ok(SearchOutput::Found( - ordered_hashes[i.checked_sub(1).unwrap()], - i.checked_sub(1).unwrap(), - )) - } else { - Ok(SearchOutput::NotFound(i)) - } - } - } -} - -fn assert_hashes_ordered(store: &(impl ReachabilityStoreReader + ?Sized), ordered_hashes: &[Hash]) { - let intervals: Vec<Interval> = ordered_hashes - .iter() - .cloned() - .map(|c| store.get_interval(c).unwrap()) - .collect(); - debug_assert!(intervals - .as_slice() - .windows(2) - .all(|w| w[0].end < w[1].start)) -} - -#[cfg(test)] -mod tests { - use super::{super::tests::*, *}; - use crate::consensusdb::schemadb::MemoryReachabilityStore; - use starcoin_types::blockhash::ORIGIN; - - #[test] - fn test_add_tree_blocks() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - // Act - let root: Hash = 1.into(); - TreeBuilder::new(&mut store) - .init_with_params(root, Interval::new(1, 15)) - .add_block(2.into(), root) - .add_block(3.into(), 2.into()) - .add_block(4.into(), 2.into()) - .add_block(5.into(), 3.into()) - .add_block(6.into(), 5.into()) - .add_block(7.into(), 1.into()) - .add_block(8.into(), 6.into()) - .add_block(9.into(), 6.into()) - .add_block(10.into(), 6.into()) - .add_block(11.into(), 6.into()); - // Assert - store.validate_intervals(root).unwrap(); - } - - #[test] - fn test_add_early_blocks() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - - // Act - let root: Hash = Hash::from_u64(1); - let mut builder = TreeBuilder::new_with_params(&mut store, 2, 5); - builder.init_with_params(root, Interval::maximal()); - for i in 2u64..100 { - builder.add_block(Hash::from_u64(i), Hash::from_u64(i / 2)); - } - - // Should trigger an earlier than reindex root allocation - builder.add_block(Hash::from_u64(100), Hash::from_u64(2)); - store.validate_intervals(root).unwrap(); - } - - #[test] - fn test_add_dag_blocks() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - let origin_hash = Hash::new(ORIGIN); - // Act - DagBuilder::new(&mut store) - .init() - .add_block(DagBlock::new(1.into(), vec![origin_hash])) - .add_block(DagBlock::new(2.into(), vec![1.into()])) - .add_block(DagBlock::new(3.into(), vec![1.into()])) - .add_block(DagBlock::new(4.into(), vec![2.into(), 3.into()])) - .add_block(DagBlock::new(5.into(), vec![4.into()])) - .add_block(DagBlock::new(6.into(), vec![1.into()])) - .add_block(DagBlock::new(7.into(), vec![5.into(), 6.into()])) - .add_block(DagBlock::new(8.into(), vec![1.into()])) - .add_block(DagBlock::new(9.into(), vec![1.into()])) - .add_block(DagBlock::new(10.into(), vec![7.into(), 8.into(), 9.into()])) - .add_block(DagBlock::new(11.into(), vec![1.into()])) - .add_block(DagBlock::new(12.into(), vec![11.into(), 10.into()])); - - // Assert intervals - store.validate_intervals(origin_hash).unwrap(); - - // Assert genesis - for i in 2u64..=12 { - assert!(store.in_past_of(1, i)); - } - - // Assert some futures - assert!(store.in_past_of(2, 4)); - assert!(store.in_past_of(2, 5)); - assert!(store.in_past_of(2, 7)); - assert!(store.in_past_of(5, 10)); - assert!(store.in_past_of(6, 10)); - assert!(store.in_past_of(10, 12)); - assert!(store.in_past_of(11, 12)); - - // Assert some anticones - assert!(store.are_anticone(2, 3)); - assert!(store.are_anticone(2, 6)); - assert!(store.are_anticone(3, 6)); - assert!(store.are_anticone(5, 6)); - assert!(store.are_anticone(3, 8)); - assert!(store.are_anticone(11, 2)); - assert!(store.are_anticone(11, 4)); - assert!(store.are_anticone(11, 6)); - assert!(store.are_anticone(11, 9)); - } -} diff --git a/flexidag/src/dag/reachability/mod.rs b/flexidag/src/dag/reachability/mod.rs deleted file mode 100644 index ceb2905b03..0000000000 --- a/flexidag/src/dag/reachability/mod.rs +++ /dev/null @@ -1,50 +0,0 @@ -mod extensions; -pub mod inquirer; -pub mod reachability_service; -mod reindex; -pub mod relations_service; - -#[cfg(test)] -mod tests; -mod tree; - -use crate::consensusdb::prelude::StoreError; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum ReachabilityError { - #[error("data store error")] - StoreError(#[from] StoreError), - - #[error("data overflow error")] - DataOverflow(String), - - #[error("data inconsistency error")] - DataInconsistency, - - #[error("query is inconsistent")] - BadQuery, -} - -impl ReachabilityError { - pub fn is_key_not_found(&self) -> bool { - matches!(self, ReachabilityError::StoreError(e) if matches!(e, StoreError::KeyNotFound(_))) - } -} - -pub type Result<T> = std::result::Result<T, ReachabilityError>; - -pub trait ReachabilityResultExtensions<T> { - /// Unwraps the error into `None` if the internal error is `StoreError::KeyNotFound` or panics otherwise - fn unwrap_option(self) -> Option<T>; -} - -impl<T> ReachabilityResultExtensions<T> for Result<T> { - fn unwrap_option(self) -> Option<T> { - match self { - Ok(value) => Some(value), - Err(err) if err.is_key_not_found() => None, - Err(err) => panic!("Unexpected reachability error: {err:?}"), - } - } -} diff --git a/flexidag/src/dag/reachability/reachability_service.rs b/flexidag/src/dag/reachability/reachability_service.rs deleted file mode 100644 index 6b2fa643a7..0000000000 --- a/flexidag/src/dag/reachability/reachability_service.rs +++ /dev/null @@ -1,315 +0,0 @@ -use super::{inquirer, Result}; -use crate::consensusdb::schemadb::ReachabilityStoreReader; -use parking_lot::RwLock; -use starcoin_crypto::{HashValue as Hash, HashValue}; -use starcoin_types::blockhash; -use std::{ops::Deref, sync::Arc}; - -pub trait ReachabilityService { - fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool; - fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result<bool>; - fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool; - fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator<Item = Hash>) -> bool; - fn is_any_dag_ancestor(&self, list: &mut impl Iterator<Item = Hash>, queried: Hash) -> bool; - fn is_any_dag_ancestor_result( - &self, - list: &mut impl Iterator<Item = Hash>, - queried: Hash, - ) -> Result<bool>; - fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; -} - -/// Multi-threaded reachability service imp -#[derive(Clone)] -pub struct MTReachabilityService<T: ReachabilityStoreReader + ?Sized> { - store: Arc<RwLock<T>>, -} - -impl<T: ReachabilityStoreReader + ?Sized> MTReachabilityService<T> { - pub fn new(store: Arc<RwLock<T>>) -> Self { - Self { store } - } -} - -impl<T: ReachabilityStoreReader + ?Sized> ReachabilityService for MTReachabilityService<T> { - fn is_chain_ancestor_of(&self, this: Hash, queried: Hash) -> bool { - let read_guard = self.store.read(); - inquirer::is_chain_ancestor_of(read_guard.deref(), this, queried).unwrap() - } - - fn is_dag_ancestor_of_result(&self, this: Hash, queried: Hash) -> Result<bool> { - let read_guard = self.store.read(); - inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried) - } - - fn is_dag_ancestor_of(&self, this: Hash, queried: Hash) -> bool { - let read_guard = self.store.read(); - inquirer::is_dag_ancestor_of(read_guard.deref(), this, queried).unwrap() - } - - fn is_any_dag_ancestor(&self, list: &mut impl Iterator<Item = Hash>, queried: Hash) -> bool { - let read_guard = self.store.read(); - list.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried).unwrap()) - } - - fn is_any_dag_ancestor_result( - &self, - list: &mut impl Iterator<Item = Hash>, - queried: Hash, - ) -> Result<bool> { - let read_guard = self.store.read(); - for hash in list { - if inquirer::is_dag_ancestor_of(read_guard.deref(), hash, queried)? { - return Ok(true); - } - } - Ok(false) - } - - fn is_dag_ancestor_of_any(&self, this: Hash, queried: &mut impl Iterator<Item = Hash>) -> bool { - let read_guard = self.store.read(); - queried.any(|hash| inquirer::is_dag_ancestor_of(read_guard.deref(), this, hash).unwrap()) - } - - fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash { - let read_guard = self.store.read(); - inquirer::get_next_chain_ancestor(read_guard.deref(), descendant, ancestor).unwrap() - } -} - -impl<T: ReachabilityStoreReader + ?Sized> MTReachabilityService<T> { - /// Returns a forward iterator walking up the chain-selection tree from `from_ancestor` - /// to `to_descendant`, where `to_descendant` is included if `inclusive` is set to true. - /// - /// To skip `from_ancestor` simply apply `skip(1)`. - /// - /// The caller is expected to verify that `from_ancestor` is indeed a chain ancestor of - /// `to_descendant`, otherwise the function will panic. - pub fn forward_chain_iterator( - &self, - from_ancestor: Hash, - to_descendant: Hash, - inclusive: bool, - ) -> impl Iterator<Item = Hash> { - ForwardChainIterator::new(self.store.clone(), from_ancestor, to_descendant, inclusive) - } - - /// Returns a backward iterator walking down the selected chain from `from_descendant` - /// to `to_ancestor`, where `to_ancestor` is included if `inclusive` is set to true. - /// - /// To skip `from_descendant` simply apply `skip(1)`. - /// - /// The caller is expected to verify that `to_ancestor` is indeed a chain ancestor of - /// `from_descendant`, otherwise the function will panic. - pub fn backward_chain_iterator( - &self, - from_descendant: Hash, - to_ancestor: Hash, - inclusive: bool, - ) -> impl Iterator<Item = Hash> { - BackwardChainIterator::new(self.store.clone(), from_descendant, to_ancestor, inclusive) - } - - /// Returns the default chain iterator, walking from `from` backward down the - /// selected chain until `virtual genesis` (aka `blockhash::ORIGIN`; exclusive) - pub fn default_backward_chain_iterator(&self, from: Hash) -> impl Iterator<Item = Hash> { - BackwardChainIterator::new( - self.store.clone(), - from, - HashValue::new(blockhash::ORIGIN), - false, - ) - } -} - -/// Iterator design: we currently read-lock at each movement of the iterator. -/// Other options are to keep the read guard throughout the iterator lifetime, or -/// a compromise where the lock is released every constant number of items. -struct BackwardChainIterator<T: ReachabilityStoreReader + ?Sized> { - store: Arc<RwLock<T>>, - current: Option<Hash>, - ancestor: Hash, - inclusive: bool, -} - -impl<T: ReachabilityStoreReader + ?Sized> BackwardChainIterator<T> { - fn new( - store: Arc<RwLock<T>>, - from_descendant: Hash, - to_ancestor: Hash, - inclusive: bool, - ) -> Self { - Self { - store, - current: Some(from_descendant), - ancestor: to_ancestor, - inclusive, - } - } -} - -impl<T: ReachabilityStoreReader + ?Sized> Iterator for BackwardChainIterator<T> { - type Item = Hash; - - fn next(&mut self) -> Option<Self::Item> { - if let Some(current) = self.current { - if current == self.ancestor { - if self.inclusive { - self.current = None; - Some(current) - } else { - self.current = None; - None - } - } else { - debug_assert_ne!(current, HashValue::new(blockhash::NONE)); - let next = self.store.read().get_parent(current).unwrap(); - self.current = Some(next); - Some(current) - } - } else { - None - } - } -} - -struct ForwardChainIterator<T: ReachabilityStoreReader + ?Sized> { - store: Arc<RwLock<T>>, - current: Option<Hash>, - descendant: Hash, - inclusive: bool, -} - -impl<T: ReachabilityStoreReader + ?Sized> ForwardChainIterator<T> { - fn new( - store: Arc<RwLock<T>>, - from_ancestor: Hash, - to_descendant: Hash, - inclusive: bool, - ) -> Self { - Self { - store, - current: Some(from_ancestor), - descendant: to_descendant, - inclusive, - } - } -} - -impl<T: ReachabilityStoreReader + ?Sized> Iterator for ForwardChainIterator<T> { - type Item = Hash; - - fn next(&mut self) -> Option<Self::Item> { - if let Some(current) = self.current { - if current == self.descendant { - if self.inclusive { - self.current = None; - Some(current) - } else { - self.current = None; - None - } - } else { - let next = inquirer::get_next_chain_ancestor( - self.store.read().deref(), - self.descendant, - current, - ) - .unwrap(); - self.current = Some(next); - Some(current) - } - } else { - None - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::consensusdb::schemadb::MemoryReachabilityStore; - use crate::dag::{reachability::tests::TreeBuilder, types::interval::Interval}; - - #[test] - fn test_forward_iterator() { - // Arrange - let mut store = MemoryReachabilityStore::new(); - - // Act - let root: Hash = 1.into(); - TreeBuilder::new(&mut store) - .init_with_params(root, Interval::new(1, 15)) - .add_block(2.into(), root) - .add_block(3.into(), 2.into()) - .add_block(4.into(), 2.into()) - .add_block(5.into(), 3.into()) - .add_block(6.into(), 5.into()) - .add_block(7.into(), 1.into()) - .add_block(8.into(), 6.into()) - .add_block(9.into(), 6.into()) - .add_block(10.into(), 6.into()) - .add_block(11.into(), 6.into()); - - let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); - - // Exclusive - let iter = service.forward_chain_iterator(2.into(), 10.into(), false); - - // Assert - let expected_hashes = [2u64, 3, 5, 6].map(Hash::from); - assert!(expected_hashes.iter().cloned().eq(iter)); - - // Inclusive - let iter = service.forward_chain_iterator(2.into(), 10.into(), true); - - // Assert - let expected_hashes = [2u64, 3, 5, 6, 10].map(Hash::from); - assert!(expected_hashes.iter().cloned().eq(iter)); - - // Compare backward to reversed forward - let forward_iter = service.forward_chain_iterator(2.into(), 10.into(), true); - let backward_iter: Vec<Hash> = service - .backward_chain_iterator(10.into(), 2.into(), true) - .collect(); - assert!(forward_iter.eq(backward_iter.iter().cloned().rev())) - } - - #[test] - fn test_iterator_boundaries() { - // Arrange & Act - let mut store = MemoryReachabilityStore::new(); - let root: Hash = 1.into(); - TreeBuilder::new(&mut store) - .init_with_params(root, Interval::new(1, 5)) - .add_block(2.into(), root); - - let service = MTReachabilityService::new(Arc::new(RwLock::new(store))); - - // Asserts - assert!([1u64, 2] - .map(Hash::from) - .iter() - .cloned() - .eq(service.forward_chain_iterator(1.into(), 2.into(), true))); - assert!([1u64] - .map(Hash::from) - .iter() - .cloned() - .eq(service.forward_chain_iterator(1.into(), 2.into(), false))); - assert!([2u64, 1] - .map(Hash::from) - .iter() - .cloned() - .eq(service.backward_chain_iterator(2.into(), root, true))); - assert!([2u64] - .map(Hash::from) - .iter() - .cloned() - .eq(service.backward_chain_iterator(2.into(), root, false))); - assert!(std::iter::once(root).eq(service.backward_chain_iterator(root, root, true))); - assert!(std::iter::empty::<Hash>().eq(service.backward_chain_iterator(root, root, false))); - assert!(std::iter::once(root).eq(service.forward_chain_iterator(root, root, true))); - assert!(std::iter::empty::<Hash>().eq(service.forward_chain_iterator(root, root, false))); - } -} diff --git a/flexidag/src/dag/reachability/reindex.rs b/flexidag/src/dag/reachability/reindex.rs deleted file mode 100644 index 48895b602a..0000000000 --- a/flexidag/src/dag/reachability/reindex.rs +++ /dev/null @@ -1,684 +0,0 @@ -use super::{ - extensions::ReachabilityStoreIntervalExtensions, inquirer::get_next_chain_ancestor_unchecked, *, -}; -use crate::consensusdb::schemadb::ReachabilityStore; -use crate::dag::types::interval::Interval; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap}; -use std::collections::VecDeque; - -/// A struct used during reindex operations. It represents a temporary context -/// for caching subtree information during the *current* reindex operation only -pub(super) struct ReindexOperationContext<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, - subtree_sizes: BlockHashMap<u64>, // Cache for subtree sizes computed during this operation - _depth: u64, - slack: u64, -} - -impl<'a, T: ReachabilityStore + ?Sized> ReindexOperationContext<'a, T> { - pub(super) fn new(store: &'a mut T, depth: u64, slack: u64) -> Self { - Self { - store, - subtree_sizes: BlockHashMap::new(), - _depth: depth, - slack, - } - } - - /// Traverses the reachability subtree that's defined by the new child - /// block and reallocates reachability interval space - /// such that another reindexing is unlikely to occur shortly - /// thereafter. It does this by traversing down the reachability - /// tree until it finds a block with an interval size that's greater than - /// its subtree size. See `propagate_interval` for further details. - pub(super) fn reindex_intervals(&mut self, new_child: Hash, reindex_root: Hash) -> Result<()> { - let mut current = new_child; - - // Search for the first ancestor with sufficient interval space - loop { - let current_interval = self.store.get_interval(current)?; - self.count_subtrees(current)?; - - // `current` has sufficient space, break and propagate - if current_interval.size() >= self.subtree_sizes[¤t] { - break; - } - - let parent = self.store.get_parent(current)?; - - if parent.is_none() { - // If we ended up here it means that there are more - // than 2^64 blocks, which shouldn't ever happen. - return Err(ReachabilityError::DataOverflow( - "missing tree - parent during reindexing. Theoretically, this - should only ever happen if there are more - than 2^64 blocks in the DAG." - .to_string(), - )); - } - - if current == reindex_root { - // Reindex root is expected to hold enough capacity as long as there are less - // than ~2^52 blocks in the DAG, which should never happen in our lifetimes - // even if block rate per second is above 100. The calculation follows from the allocation of - // 2^12 (which equals 2^64/2^52) for slack per chain block below the reindex root. - return Err(ReachabilityError::DataOverflow(format!( - "unexpected behavior: reindex root {reindex_root} is out of capacity during reindexing. - Theoretically, this should only ever happen if there are more than ~2^52 blocks in the DAG." - ))); - } - - if inquirer::is_strict_chain_ancestor_of(self.store, parent, reindex_root)? { - // In this case parent is guaranteed to have sufficient interval space, - // however we avoid reindexing the entire subtree above parent - // (which includes root and thus majority of blocks mined since) - // and use slacks along the chain up forward from parent to reindex root. - // Notes: - // 1. we set `required_allocation` = subtree size of current in order to double the - // current interval capacity - // 2. it might be the case that current is the `new_child` itself - return self.reindex_intervals_earlier_than_root( - current, - reindex_root, - parent, - self.subtree_sizes[¤t], - ); - } - - current = parent - } - - self.propagate_interval(current) - } - - /// - /// Core (BFS) algorithms used during reindexing (see `count_subtrees` and `propagate_interval` below) - /// - /// - /// count_subtrees counts the size of each subtree under this block, - /// and populates self.subtree_sizes with the results. - /// It is equivalent to the following recursive implementation: - /// - /// fn count_subtrees(&mut self, block: Hash) -> Result<u64> { - /// let mut subtree_size = 0u64; - /// for child in self.store.get_children(block)?.iter().cloned() { - /// subtree_size += self.count_subtrees(child)?; - /// } - /// self.subtree_sizes.insert(block, subtree_size + 1); - /// Ok(subtree_size + 1) - /// } - /// - /// However, we are expecting (linearly) deep trees, and so a - /// recursive stack-based approach is inefficient and will hit - /// recursion limits. Instead, the same logic was implemented - /// using a (queue-based) BFS method. At a high level, the - /// algorithm uses BFS for reaching all leaves and pushes - /// intermediate updates from leaves via parent chains until all - /// size information is gathered at the root of the operation - /// (i.e. at block). - fn count_subtrees(&mut self, block: Hash) -> Result<()> { - if self.subtree_sizes.contains_key(&block) { - return Ok(()); - } - - let mut queue = VecDeque::<Hash>::from([block]); - let mut counts = BlockHashMap::<u64>::new(); - - while let Some(mut current) = queue.pop_front() { - let children = self.store.get_children(current)?; - if children.is_empty() { - // We reached a leaf - self.subtree_sizes.insert(current, 1); - } else if !self.subtree_sizes.contains_key(¤t) { - // We haven't yet calculated the subtree size of - // the current block. Add all its children to the - // queue - queue.extend(children.iter()); - continue; - } - - // We reached a leaf or a pre-calculated subtree. - // Push information up - while current != block { - current = self.store.get_parent(current)?; - - let count = counts.entry(current).or_insert(0); - let children = self.store.get_children(current)?; - - *count = (*count).checked_add(1).unwrap(); - if *count < children.len() as u64 { - // Not all subtrees of the current block are ready - break; - } - - // All children of `current` have calculated their subtree size. - // Sum them all together and add 1 to get the sub tree size of - // `current`. - let subtree_sum: u64 = children.iter().map(|c| self.subtree_sizes[c]).sum(); - self.subtree_sizes - .insert(current, subtree_sum.checked_add(1).unwrap()); - } - } - - Ok(()) - } - - /// Propagates a new interval using a BFS traversal. - /// Subtree intervals are recursively allocated according to subtree sizes and - /// the allocation rule in `Interval::split_exponential`. - fn propagate_interval(&mut self, block: Hash) -> Result<()> { - // Make sure subtrees are counted before propagating - self.count_subtrees(block)?; - - let mut queue = VecDeque::<Hash>::from([block]); - while let Some(current) = queue.pop_front() { - let children = self.store.get_children(current)?; - if !children.is_empty() { - let sizes: Vec<u64> = children.iter().map(|c| self.subtree_sizes[c]).collect(); - let interval = self.store.interval_children_capacity(current)?; - let intervals = interval.split_exponential(&sizes); - for (c, ci) in children.iter().copied().zip(intervals) { - self.store.set_interval(c, ci)?; - } - queue.extend(children.iter()); - } - } - Ok(()) - } - - /// This method implements the reindex algorithm for the case where the - /// new child node is not in reindex root's subtree. The function is expected to allocate - /// `required_allocation` to be added to interval of `allocation_block`. `common_ancestor` is - /// expected to be a direct parent of `allocation_block` and an ancestor of current `reindex_root`. - fn reindex_intervals_earlier_than_root( - &mut self, - allocation_block: Hash, - reindex_root: Hash, - common_ancestor: Hash, - required_allocation: u64, - ) -> Result<()> { - // The chosen child is: (i) child of `common_ancestor`; (ii) an - // ancestor of `reindex_root` or `reindex_root` itself - let chosen_child = - get_next_chain_ancestor_unchecked(self.store, reindex_root, common_ancestor)?; - let block_interval = self.store.get_interval(allocation_block)?; - let chosen_interval = self.store.get_interval(chosen_child)?; - - if block_interval.start < chosen_interval.start { - // `allocation_block` is in the subtree before the chosen child - self.reclaim_interval_before( - allocation_block, - common_ancestor, - chosen_child, - reindex_root, - required_allocation, - ) - } else { - // `allocation_block` is in the subtree after the chosen child - self.reclaim_interval_after( - allocation_block, - common_ancestor, - chosen_child, - reindex_root, - required_allocation, - ) - } - } - - fn reclaim_interval_before( - &mut self, - allocation_block: Hash, - common_ancestor: Hash, - chosen_child: Hash, - reindex_root: Hash, - required_allocation: u64, - ) -> Result<()> { - let mut slack_sum = 0u64; - let mut path_len = 0u64; - let mut path_slack_alloc = 0u64; - - let mut current = chosen_child; - // Walk up the chain from common ancestor's chosen child towards reindex root - loop { - if current == reindex_root { - // Reached reindex root. In this case, since we reached (the unlimited) root, - // we also re-allocate new slack for the chain we just traversed - let offset = required_allocation - .checked_add(self.slack.checked_mul(path_len).unwrap()) - .unwrap() - .checked_sub(slack_sum) - .unwrap(); - self.apply_interval_op_and_propagate(current, offset, Interval::increase_start)?; - self.offset_siblings_before(allocation_block, current, offset)?; - - // Set the slack for each chain block to be reserved below during the chain walk-down - path_slack_alloc = self.slack; - break; - } - - let slack_before_current = self.store.interval_remaining_before(current)?.size(); - slack_sum = slack_sum.checked_add(slack_before_current).unwrap(); - - if slack_sum >= required_allocation { - // Set offset to be just enough to satisfy required allocation - let offset = slack_before_current - .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) - .unwrap(); - self.apply_interval_op(current, offset, Interval::increase_start)?; - self.offset_siblings_before(allocation_block, current, offset)?; - - break; - } - - current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; - path_len = path_len.checked_add(1).unwrap(); - } - - // Go back down the reachability tree towards the common ancestor. - // On every hop we reindex the reachability subtree before the - // current block with an interval that is smaller. - // This is to make room for the required allocation. - loop { - current = self.store.get_parent(current)?; - if current == common_ancestor { - break; - } - - let slack_before_current = self.store.interval_remaining_before(current)?.size(); - let offset = slack_before_current.checked_sub(path_slack_alloc).unwrap(); - self.apply_interval_op(current, offset, Interval::increase_start)?; - self.offset_siblings_before(allocation_block, current, offset)?; - } - - Ok(()) - } - - fn reclaim_interval_after( - &mut self, - allocation_block: Hash, - common_ancestor: Hash, - chosen_child: Hash, - reindex_root: Hash, - required_allocation: u64, - ) -> Result<()> { - let mut slack_sum = 0u64; - let mut path_len = 0u64; - let mut path_slack_alloc = 0u64; - - let mut current = chosen_child; - // Walk up the chain from common ancestor's chosen child towards reindex root - loop { - if current == reindex_root { - // Reached reindex root. In this case, since we reached (the unlimited) root, - // we also re-allocate new slack for the chain we just traversed - let offset = required_allocation - .checked_add(self.slack.checked_mul(path_len).unwrap()) - .unwrap() - .checked_sub(slack_sum) - .unwrap(); - self.apply_interval_op_and_propagate(current, offset, Interval::decrease_end)?; - self.offset_siblings_after(allocation_block, current, offset)?; - - // Set the slack for each chain block to be reserved below during the chain walk-down - path_slack_alloc = self.slack; - break; - } - - let slack_after_current = self.store.interval_remaining_after(current)?.size(); - slack_sum = slack_sum.checked_add(slack_after_current).unwrap(); - - if slack_sum >= required_allocation { - // Set offset to be just enough to satisfy required allocation - let offset = slack_after_current - .checked_sub(slack_sum.checked_sub(required_allocation).unwrap()) - .unwrap(); - self.apply_interval_op(current, offset, Interval::decrease_end)?; - self.offset_siblings_after(allocation_block, current, offset)?; - - break; - } - - current = get_next_chain_ancestor_unchecked(self.store, reindex_root, current)?; - path_len = path_len.checked_add(1).unwrap(); - } - - // Go back down the reachability tree towards the common ancestor. - // On every hop we reindex the reachability subtree before the - // current block with an interval that is smaller. - // This is to make room for the required allocation. - loop { - current = self.store.get_parent(current)?; - if current == common_ancestor { - break; - } - - let slack_after_current = self.store.interval_remaining_after(current)?.size(); - let offset = slack_after_current.checked_sub(path_slack_alloc).unwrap(); - self.apply_interval_op(current, offset, Interval::decrease_end)?; - self.offset_siblings_after(allocation_block, current, offset)?; - } - - Ok(()) - } - - fn offset_siblings_before( - &mut self, - allocation_block: Hash, - current: Hash, - offset: u64, - ) -> Result<()> { - let parent = self.store.get_parent(current)?; - let children = self.store.get_children(parent)?; - - let (siblings_before, _) = split_children(&children, current)?; - for sibling in siblings_before.iter().cloned().rev() { - if sibling == allocation_block { - // We reached our final destination, allocate `offset` to `allocation_block` by increasing end and break - self.apply_interval_op_and_propagate( - allocation_block, - offset, - Interval::increase_end, - )?; - break; - } - // For non-`allocation_block` siblings offset the interval upwards in order to create space - self.apply_interval_op_and_propagate(sibling, offset, Interval::increase)?; - } - - Ok(()) - } - - fn offset_siblings_after( - &mut self, - allocation_block: Hash, - current: Hash, - offset: u64, - ) -> Result<()> { - let parent = self.store.get_parent(current)?; - let children = self.store.get_children(parent)?; - - let (_, siblings_after) = split_children(&children, current)?; - for sibling in siblings_after.iter().cloned() { - if sibling == allocation_block { - // We reached our final destination, allocate `offset` to `allocation_block` by decreasing only start and break - self.apply_interval_op_and_propagate( - allocation_block, - offset, - Interval::decrease_start, - )?; - break; - } - // For siblings before `allocation_block` offset the interval downwards to create space - self.apply_interval_op_and_propagate(sibling, offset, Interval::decrease)?; - } - - Ok(()) - } - - fn apply_interval_op( - &mut self, - block: Hash, - offset: u64, - op: fn(&Interval, u64) -> Interval, - ) -> Result<()> { - self.store - .set_interval(block, op(&self.store.get_interval(block)?, offset))?; - Ok(()) - } - - fn apply_interval_op_and_propagate( - &mut self, - block: Hash, - offset: u64, - op: fn(&Interval, u64) -> Interval, - ) -> Result<()> { - self.store - .set_interval(block, op(&self.store.get_interval(block)?, offset))?; - self.propagate_interval(block)?; - Ok(()) - } - - /// A method for handling reindex operations triggered by moving the reindex root - pub(super) fn concentrate_interval( - &mut self, - parent: Hash, - child: Hash, - is_final_reindex_root: bool, - ) -> Result<()> { - let children = self.store.get_children(parent)?; - - // Split the `children` of `parent` to siblings before `child` and siblings after `child` - let (siblings_before, siblings_after) = split_children(&children, child)?; - - let siblings_before_subtrees_sum: u64 = - self.tighten_intervals_before(parent, siblings_before)?; - let siblings_after_subtrees_sum: u64 = - self.tighten_intervals_after(parent, siblings_after)?; - - self.expand_interval_to_chosen( - parent, - child, - siblings_before_subtrees_sum, - siblings_after_subtrees_sum, - is_final_reindex_root, - )?; - - Ok(()) - } - - pub(super) fn tighten_intervals_before( - &mut self, - parent: Hash, - children_before: &[Hash], - ) -> Result<u64> { - let sizes = children_before - .iter() - .cloned() - .map(|block| { - self.count_subtrees(block)?; - Ok(self.subtree_sizes[&block]) - }) - .collect::<Result<Vec<u64>>>()?; - let sum = sizes.iter().sum(); - - let interval = self.store.get_interval(parent)?; - let interval_before = Interval::new( - interval.start.checked_add(self.slack).unwrap(), - interval - .start - .checked_add(self.slack) - .unwrap() - .checked_add(sum) - .unwrap() - .checked_sub(1) - .unwrap(), - ); - - for (c, ci) in children_before - .iter() - .cloned() - .zip(interval_before.split_exact(sizes.as_slice())) - { - self.store.set_interval(c, ci)?; - self.propagate_interval(c)?; - } - - Ok(sum) - } - - pub(super) fn tighten_intervals_after( - &mut self, - parent: Hash, - children_after: &[Hash], - ) -> Result<u64> { - let sizes = children_after - .iter() - .cloned() - .map(|block| { - self.count_subtrees(block)?; - Ok(self.subtree_sizes[&block]) - }) - .collect::<Result<Vec<u64>>>()?; - let sum = sizes.iter().sum(); - - let interval = self.store.get_interval(parent)?; - let interval_after = Interval::new( - interval - .end - .checked_sub(self.slack) - .unwrap() - .checked_sub(sum) - .unwrap(), - interval - .end - .checked_sub(self.slack) - .unwrap() - .checked_sub(1) - .unwrap(), - ); - - for (c, ci) in children_after - .iter() - .cloned() - .zip(interval_after.split_exact(sizes.as_slice())) - { - self.store.set_interval(c, ci)?; - self.propagate_interval(c)?; - } - - Ok(sum) - } - - pub(super) fn expand_interval_to_chosen( - &mut self, - parent: Hash, - child: Hash, - siblings_before_subtrees_sum: u64, - siblings_after_subtrees_sum: u64, - is_final_reindex_root: bool, - ) -> Result<()> { - let interval = self.store.get_interval(parent)?; - let allocation = Interval::new( - interval - .start - .checked_add(siblings_before_subtrees_sum) - .unwrap() - .checked_add(self.slack) - .unwrap(), - interval - .end - .checked_sub(siblings_after_subtrees_sum) - .unwrap() - .checked_sub(self.slack) - .unwrap() - .checked_sub(1) - .unwrap(), - ); - let current = self.store.get_interval(child)?; - - // Propagate interval only if the chosen `child` is the final reindex root AND - // the new interval doesn't contain the previous one - if is_final_reindex_root && !allocation.contains(current) { - /* - We deallocate slack on both sides as an optimization. Were we to - assign the fully allocated interval, the next time the reindex root moves we - would need to propagate intervals again. However when we do allocate slack, - next time this method is called (next time the reindex root moves), `allocation` is likely to contain `current`. - Note that below following the propagation we reassign the full `allocation` to `child`. - */ - let narrowed = Interval::new( - allocation.start.checked_add(self.slack).unwrap(), - allocation.end.checked_sub(self.slack).unwrap(), - ); - self.store.set_interval(child, narrowed)?; - self.propagate_interval(child)?; - } - - self.store.set_interval(child, allocation)?; - Ok(()) - } -} - -/// Splits `children` into two slices: the blocks that are before `pivot` and the blocks that are after. -fn split_children(children: &std::sync::Arc<Vec<Hash>>, pivot: Hash) -> Result<(&[Hash], &[Hash])> { - if let Some(index) = children.iter().cloned().position(|c| c == pivot) { - Ok(( - &children[..index], - &children[index.checked_add(1).unwrap()..], - )) - } else { - Err(ReachabilityError::DataInconsistency) - } -} - -#[cfg(test)] -mod tests { - use super::{super::tests::*, *}; - use crate::consensusdb::schemadb::{MemoryReachabilityStore, ReachabilityStoreReader}; - use crate::dag::types::interval::Interval; - use starcoin_types::blockhash; - - #[test] - fn test_count_subtrees() { - let mut store = MemoryReachabilityStore::new(); - - // Arrange - let root: Hash = 1.into(); - StoreBuilder::new(&mut store) - .add_block(root, Hash::new(blockhash::NONE)) - .add_block(2.into(), root) - .add_block(3.into(), 2.into()) - .add_block(4.into(), 2.into()) - .add_block(5.into(), 3.into()) - .add_block(6.into(), 5.into()) - .add_block(7.into(), 1.into()) - .add_block(8.into(), 6.into()); - - // Act - let mut ctx = ReindexOperationContext::new(&mut store, 10, 16); - ctx.count_subtrees(root).unwrap(); - - // Assert - let expected = [ - (1u64, 8u64), - (2, 6), - (3, 4), - (4, 1), - (5, 3), - (6, 2), - (7, 1), - (8, 1), - ] - .iter() - .cloned() - .map(|(h, c)| (Hash::from(h), c)) - .collect::<BlockHashMap<u64>>(); - - assert_eq!(expected, ctx.subtree_sizes); - - // Act - ctx.store.set_interval(root, Interval::new(1, 8)).unwrap(); - ctx.propagate_interval(root).unwrap(); - - // Assert intervals manually - let expected_intervals = [ - (1u64, (1u64, 8u64)), - (2, (1, 6)), - (3, (1, 4)), - (4, (5, 5)), - (5, (1, 3)), - (6, (1, 2)), - (7, (7, 7)), - (8, (1, 1)), - ]; - let actual_intervals = (1u64..=8) - .map(|i| (i, ctx.store.get_interval(i.into()).unwrap().into())) - .collect::<Vec<(u64, (u64, u64))>>(); - assert_eq!(actual_intervals, expected_intervals); - - // Assert intervals follow the general rules - store.validate_intervals(root).unwrap(); - } -} diff --git a/flexidag/src/dag/reachability/relations_service.rs b/flexidag/src/dag/reachability/relations_service.rs deleted file mode 100644 index 755cfb49be..0000000000 --- a/flexidag/src/dag/reachability/relations_service.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::consensusdb::{prelude::StoreError, schemadb::RelationsStoreReader}; -use parking_lot::RwLock; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlockHashes; -use std::sync::Arc; -/// Multi-threaded block-relations service imp -#[derive(Clone)] -pub struct MTRelationsService<T: RelationsStoreReader> { - store: Arc<RwLock<Vec<T>>>, - level: usize, -} - -impl<T: RelationsStoreReader> MTRelationsService<T> { - pub fn new(store: Arc<RwLock<Vec<T>>>, level: u8) -> Self { - Self { - store, - level: level as usize, - } - } -} - -impl<T: RelationsStoreReader> RelationsStoreReader for MTRelationsService<T> { - fn get_parents(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - self.store.read()[self.level].get_parents(hash) - } - - fn get_children(&self, hash: Hash) -> Result<BlockHashes, StoreError> { - self.store.read()[self.level].get_children(hash) - } - - fn has(&self, hash: Hash) -> Result<bool, StoreError> { - self.store.read()[self.level].has(hash) - } -} diff --git a/flexidag/src/dag/reachability/tests.rs b/flexidag/src/dag/reachability/tests.rs deleted file mode 100644 index e9fa593c86..0000000000 --- a/flexidag/src/dag/reachability/tests.rs +++ /dev/null @@ -1,264 +0,0 @@ -//! -//! Test utils for reachability -//! -use super::{inquirer::*, tree::*}; -use crate::consensusdb::{ - prelude::StoreError, - schemadb::{ReachabilityStore, ReachabilityStoreReader}, -}; -use crate::dag::types::{interval::Interval, perf}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashExtensions, BlockHashMap, BlockHashSet}; -use std::collections::VecDeque; -use thiserror::Error; - -/// A struct with fluent API to streamline reachability store building -pub struct StoreBuilder<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, -} - -impl<'a, T: ReachabilityStore + ?Sized> StoreBuilder<'a, T> { - pub fn new(store: &'a mut T) -> Self { - Self { store } - } - - pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { - let parent_height = if !parent.is_none() { - self.store.append_child(parent, hash).unwrap() - } else { - 0 - }; - self.store - .insert(hash, parent, Interval::empty(), parent_height + 1) - .unwrap(); - self - } -} - -/// A struct with fluent API to streamline tree building -pub struct TreeBuilder<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, - reindex_depth: u64, - reindex_slack: u64, -} - -impl<'a, T: ReachabilityStore + ?Sized> TreeBuilder<'a, T> { - pub fn new(store: &'a mut T) -> Self { - Self { - store, - reindex_depth: perf::DEFAULT_REINDEX_DEPTH, - reindex_slack: perf::DEFAULT_REINDEX_SLACK, - } - } - - pub fn new_with_params(store: &'a mut T, reindex_depth: u64, reindex_slack: u64) -> Self { - Self { - store, - reindex_depth, - reindex_slack, - } - } - - pub fn init(&mut self) -> &mut Self { - init(self.store).unwrap(); - self - } - - pub fn init_with_params(&mut self, origin: Hash, capacity: Interval) -> &mut Self { - init_with_params(self.store, origin, capacity).unwrap(); - self - } - - pub fn add_block(&mut self, hash: Hash, parent: Hash) -> &mut Self { - add_tree_block( - self.store, - hash, - parent, - self.reindex_depth, - self.reindex_slack, - ) - .unwrap(); - try_advancing_reindex_root(self.store, hash, self.reindex_depth, self.reindex_slack) - .unwrap(); - self - } - - pub fn store(&self) -> &&'a mut T { - &self.store - } -} - -#[derive(Clone)] -pub struct DagBlock { - pub hash: Hash, - pub parents: Vec<Hash>, -} - -impl DagBlock { - pub fn new(hash: Hash, parents: Vec<Hash>) -> Self { - Self { hash, parents } - } -} - -/// A struct with fluent API to streamline DAG building -pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized> { - store: &'a mut T, - map: BlockHashMap<DagBlock>, -} - -impl<'a, T: ReachabilityStore + ?Sized> DagBuilder<'a, T> { - pub fn new(store: &'a mut T) -> Self { - Self { - store, - map: BlockHashMap::new(), - } - } - - pub fn init(&mut self) -> &mut Self { - init(self.store).unwrap(); - self - } - - pub fn add_block(&mut self, block: DagBlock) -> &mut Self { - // Select by height (longest chain) just for the sake of internal isolated tests - let selected_parent = block - .parents - .iter() - .cloned() - .max_by_key(|p| self.store.get_height(*p).unwrap()) - .unwrap(); - let mergeset = self.mergeset(&block, selected_parent); - add_block( - self.store, - block.hash, - selected_parent, - &mut mergeset.iter().cloned(), - ) - .unwrap(); - hint_virtual_selected_parent(self.store, block.hash).unwrap(); - self.map.insert(block.hash, block); - self - } - - fn mergeset(&self, block: &DagBlock, selected_parent: Hash) -> Vec<Hash> { - let mut queue: VecDeque<Hash> = block - .parents - .iter() - .copied() - .filter(|p| *p != selected_parent) - .collect(); - let mut mergeset: BlockHashSet = queue.iter().copied().collect(); - let mut past = BlockHashSet::new(); - - while let Some(current) = queue.pop_front() { - for parent in self.map[¤t].parents.iter() { - if mergeset.contains(parent) || past.contains(parent) { - continue; - } - - if is_dag_ancestor_of(self.store, *parent, selected_parent).unwrap() { - past.insert(*parent); - continue; - } - - mergeset.insert(*parent); - queue.push_back(*parent); - } - } - mergeset.into_iter().collect() - } - - pub fn store(&self) -> &&'a mut T { - &self.store - } -} - -#[derive(Error, Debug)] -pub enum TestError { - #[error("data store error")] - StoreError(#[from] StoreError), - - #[error("empty interval")] - EmptyInterval(Hash, Interval), - - #[error("sibling intervals are expected to be consecutive")] - NonConsecutiveSiblingIntervals(Interval, Interval), - - #[error("child interval out of parent bounds")] - IntervalOutOfParentBounds { - parent: Hash, - child: Hash, - parent_interval: Interval, - child_interval: Interval, - }, -} - -pub trait StoreValidationExtensions { - /// Checks if `block` is in the past of `other` (creates hashes from the u64 numbers) - fn in_past_of(&self, block: u64, other: u64) -> bool; - - /// Checks if `block` and `other` are in the anticone of each other - /// (creates hashes from the u64 numbers) - fn are_anticone(&self, block: u64, other: u64) -> bool; - - /// Validates that all tree intervals match the expected interval relations - fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError>; -} - -impl<T: ReachabilityStoreReader + ?Sized> StoreValidationExtensions for T { - fn in_past_of(&self, block: u64, other: u64) -> bool { - if block == other { - return false; - } - let res = is_dag_ancestor_of(self, block.into(), other.into()).unwrap(); - if res { - // Assert that the `future` relation is indeed asymmetric - assert!(!is_dag_ancestor_of(self, other.into(), block.into()).unwrap()) - } - res - } - - fn are_anticone(&self, block: u64, other: u64) -> bool { - !is_dag_ancestor_of(self, block.into(), other.into()).unwrap() - && !is_dag_ancestor_of(self, other.into(), block.into()).unwrap() - } - - fn validate_intervals(&self, root: Hash) -> std::result::Result<(), TestError> { - let mut queue = VecDeque::<Hash>::from([root]); - while let Some(parent) = queue.pop_front() { - let children = self.get_children(parent)?; - queue.extend(children.iter()); - - let parent_interval = self.get_interval(parent)?; - if parent_interval.is_empty() { - return Err(TestError::EmptyInterval(parent, parent_interval)); - } - - // Verify parent-child strict relation - for child in children.iter().cloned() { - let child_interval = self.get_interval(child)?; - if !parent_interval.strictly_contains(child_interval) { - return Err(TestError::IntervalOutOfParentBounds { - parent, - child, - parent_interval, - child_interval, - }); - } - } - - // Iterate over consecutive siblings - for siblings in children.windows(2) { - let sibling_interval = self.get_interval(siblings[0])?; - let current_interval = self.get_interval(siblings[1])?; - if sibling_interval.end + 1 != current_interval.start { - return Err(TestError::NonConsecutiveSiblingIntervals( - sibling_interval, - current_interval, - )); - } - } - } - Ok(()) - } -} diff --git a/flexidag/src/dag/reachability/tree.rs b/flexidag/src/dag/reachability/tree.rs deleted file mode 100644 index a0d98a9b23..0000000000 --- a/flexidag/src/dag/reachability/tree.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! -//! Tree-related functions internal to the module -//! -use super::{ - extensions::ReachabilityStoreIntervalExtensions, inquirer::*, reindex::ReindexOperationContext, - *, -}; -use crate::consensusdb::schemadb::ReachabilityStore; -use starcoin_crypto::HashValue as Hash; - -/// Adds `new_block` as a child of `parent` in the tree structure. If this block -/// has no remaining interval to allocate, a reindexing is triggered. When a reindexing -/// is triggered, the reindex root point is used within the reindex algorithm's logic -pub fn add_tree_block( - store: &mut (impl ReachabilityStore + ?Sized), - new_block: Hash, - parent: Hash, - reindex_depth: u64, - reindex_slack: u64, -) -> Result<()> { - // Get the remaining interval capacity - let remaining = store.interval_remaining_after(parent)?; - // Append the new child to `parent.children` - let parent_height = store.append_child(parent, new_block)?; - if remaining.is_empty() { - // Init with the empty interval. - // Note: internal logic relies on interval being this specific interval - // which comes exactly at the end of current capacity - store.insert( - new_block, - parent, - remaining, - parent_height.checked_add(1).unwrap(), - )?; - - // Start a reindex operation (TODO: add timing) - let reindex_root = store.get_reindex_root()?; - let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); - ctx.reindex_intervals(new_block, reindex_root)?; - } else { - let allocated = remaining.split_half().0; - store.insert( - new_block, - parent, - allocated, - parent_height.checked_add(1).unwrap(), - )?; - }; - Ok(()) -} - -/// Finds the most recent tree ancestor common to both `block` and the given `reindex root`. -/// Note that we assume that almost always the chain between the reindex root and the common -/// ancestor is longer than the chain between block and the common ancestor, hence we iterate -/// from `block`. -pub fn find_common_tree_ancestor( - store: &(impl ReachabilityStore + ?Sized), - block: Hash, - reindex_root: Hash, -) -> Result<Hash> { - let mut current = block; - loop { - if is_chain_ancestor_of(store, current, reindex_root)? { - return Ok(current); - } - current = store.get_parent(current)?; - } -} - -/// Finds a possible new reindex root, based on the `current` reindex root and the selected tip `hint` -pub fn find_next_reindex_root( - store: &(impl ReachabilityStore + ?Sized), - current: Hash, - hint: Hash, - reindex_depth: u64, - reindex_slack: u64, -) -> Result<(Hash, Hash)> { - let mut ancestor = current; - let mut next = current; - - let hint_height = store.get_height(hint)?; - - // Test if current root is ancestor of selected tip (`hint`) - if not, this is a reorg case - if !is_chain_ancestor_of(store, current, hint)? { - let current_height = store.get_height(current)?; - - // We have reindex root out of (hint) selected tip chain, however we switch chains only after a sufficient - // threshold of `reindex_slack` diff in order to address possible alternating reorg attacks. - // The `reindex_slack` constant is used as an heuristic large enough on the one hand, but - // one which will not harm performance on the other hand - given the available slack at the chain split point. - // - // Note: In some cases the height of the (hint) selected tip can be lower than the current reindex root height. - // If that's the case we keep the reindex root unchanged. - if hint_height < current_height - || hint_height.checked_sub(current_height).unwrap() < reindex_slack - { - return Ok((current, current)); - } - - let common = find_common_tree_ancestor(store, hint, current)?; - ancestor = common; - next = common; - } - - // Iterate from ancestor towards the selected tip (`hint`) until passing the - // `reindex_window` threshold, for finding the new reindex root - loop { - let child = get_next_chain_ancestor_unchecked(store, hint, next)?; - let child_height = store.get_height(child)?; - - if hint_height < child_height { - return Err(ReachabilityError::DataInconsistency); - } - if hint_height.checked_sub(child_height).unwrap() < reindex_depth { - break; - } - next = child; - } - - Ok((ancestor, next)) -} - -/// Attempts to advance or move the current reindex root according to the -/// provided `virtual selected parent` (`VSP`) hint. -/// It is important for the reindex root point to follow the consensus-agreed chain -/// since this way it can benefit from chain-robustness which is implied by the security -/// of the ordering protocol. That is, it enjoys from the fact that all future blocks are -/// expected to elect the root subtree (by converging to the agreement to have it on the -/// selected chain). See also the reachability algorithms overview (TODO) -pub fn try_advancing_reindex_root( - store: &mut (impl ReachabilityStore + ?Sized), - hint: Hash, - reindex_depth: u64, - reindex_slack: u64, -) -> Result<()> { - // Get current root from the store - let current = store.get_reindex_root()?; - - // Find the possible new root - let (mut ancestor, next) = - find_next_reindex_root(store, current, hint, reindex_depth, reindex_slack)?; - - // No update to root, return - if current == next { - return Ok(()); - } - - // if ancestor == next { - // trace!("next reindex root is an ancestor of current one, skipping concentration.") - // } - while ancestor != next { - let child = get_next_chain_ancestor_unchecked(store, next, ancestor)?; - let mut ctx = ReindexOperationContext::new(store, reindex_depth, reindex_slack); - ctx.concentrate_interval(ancestor, child, child == next)?; - ancestor = child; - } - - // Update reindex root in the data store - store.set_reindex_root(next)?; - Ok(()) -} diff --git a/flexidag/src/dag/types/ghostdata.rs b/flexidag/src/dag/types/ghostdata.rs deleted file mode 100644 index c680172148..0000000000 --- a/flexidag/src/dag/types/ghostdata.rs +++ /dev/null @@ -1,147 +0,0 @@ -use super::trusted::ExternalGhostdagData; -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashMap, BlockHashes, BlueWorkType, HashKTypeMap, KType}; -use std::sync::Arc; - -#[derive(Clone, Serialize, Deserialize, Default, Debug)] -pub struct GhostdagData { - pub blue_score: u64, - pub blue_work: BlueWorkType, - pub selected_parent: Hash, - pub mergeset_blues: BlockHashes, - pub mergeset_reds: BlockHashes, - pub blues_anticone_sizes: HashKTypeMap, -} - -#[derive(Clone, Debug, Default, Serialize, Deserialize, Copy)] -pub struct CompactGhostdagData { - pub blue_score: u64, - pub blue_work: BlueWorkType, - pub selected_parent: Hash, -} - -impl From<ExternalGhostdagData> for GhostdagData { - fn from(value: ExternalGhostdagData) -> Self { - Self { - blue_score: value.blue_score, - blue_work: value.blue_work, - selected_parent: value.selected_parent, - mergeset_blues: Arc::new(value.mergeset_blues), - mergeset_reds: Arc::new(value.mergeset_reds), - blues_anticone_sizes: Arc::new(value.blues_anticone_sizes), - } - } -} - -impl From<&GhostdagData> for ExternalGhostdagData { - fn from(value: &GhostdagData) -> Self { - Self { - blue_score: value.blue_score, - blue_work: value.blue_work, - selected_parent: value.selected_parent, - mergeset_blues: (*value.mergeset_blues).clone(), - mergeset_reds: (*value.mergeset_reds).clone(), - blues_anticone_sizes: (*value.blues_anticone_sizes).clone(), - } - } -} - -impl GhostdagData { - pub fn new( - blue_score: u64, - blue_work: BlueWorkType, - selected_parent: Hash, - mergeset_blues: BlockHashes, - mergeset_reds: BlockHashes, - blues_anticone_sizes: HashKTypeMap, - ) -> Self { - Self { - blue_score, - blue_work, - selected_parent, - mergeset_blues, - mergeset_reds, - blues_anticone_sizes, - } - } - - pub fn new_with_selected_parent(selected_parent: Hash, k: KType) -> Self { - let mut mergeset_blues: Vec<Hash> = Vec::with_capacity(k.checked_add(1).unwrap() as usize); - let mut blues_anticone_sizes: BlockHashMap<KType> = BlockHashMap::with_capacity(k as usize); - mergeset_blues.push(selected_parent); - blues_anticone_sizes.insert(selected_parent, 0); - - Self { - blue_score: Default::default(), - blue_work: Default::default(), - selected_parent, - mergeset_blues: BlockHashes::new(mergeset_blues), - mergeset_reds: Default::default(), - blues_anticone_sizes: HashKTypeMap::new(blues_anticone_sizes), - } - } - - pub fn mergeset_size(&self) -> usize { - self.mergeset_blues - .len() - .checked_add(self.mergeset_reds.len()) - .unwrap() - } - - /// Returns an iterator to the mergeset with no specified order (excluding the selected parent) - pub fn unordered_mergeset_without_selected_parent(&self) -> impl Iterator<Item = Hash> + '_ { - self.mergeset_blues - .iter() - .skip(1) // Skip the selected parent - .cloned() - .chain(self.mergeset_reds.iter().cloned()) - } - - /// Returns an iterator to the mergeset with no specified order (including the selected parent) - pub fn unordered_mergeset(&self) -> impl Iterator<Item = Hash> + '_ { - self.mergeset_blues - .iter() - .cloned() - .chain(self.mergeset_reds.iter().cloned()) - } - - pub fn to_compact(&self) -> CompactGhostdagData { - CompactGhostdagData { - blue_score: self.blue_score, - blue_work: self.blue_work, - selected_parent: self.selected_parent, - } - } - - pub fn add_blue( - &mut self, - block: Hash, - blue_anticone_size: KType, - block_blues_anticone_sizes: &BlockHashMap<KType>, - ) { - // Add the new blue block to mergeset blues - BlockHashes::make_mut(&mut self.mergeset_blues).push(block); - - // Get a mut ref to internal anticone size map - let blues_anticone_sizes = HashKTypeMap::make_mut(&mut self.blues_anticone_sizes); - - // Insert the new blue block with its blue anticone size to the map - blues_anticone_sizes.insert(block, blue_anticone_size); - - // Insert/update map entries for blocks affected by this insertion - for (blue, size) in block_blues_anticone_sizes { - blues_anticone_sizes.insert(*blue, size.checked_add(1).unwrap()); - } - } - - pub fn add_red(&mut self, block: Hash) { - // Add the new red block to mergeset reds - BlockHashes::make_mut(&mut self.mergeset_reds).push(block); - } - - pub fn finalize_score_and_work(&mut self, blue_score: u64, blue_work: BlueWorkType) { - self.blue_score = blue_score; - self.blue_work = blue_work; - } -} diff --git a/flexidag/src/dag/types/interval.rs b/flexidag/src/dag/types/interval.rs deleted file mode 100644 index 0b5cc4f6e5..0000000000 --- a/flexidag/src/dag/types/interval.rs +++ /dev/null @@ -1,377 +0,0 @@ -use serde::{Deserialize, Serialize}; -use std::fmt::{Display, Formatter}; - -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] -pub struct Interval { - pub start: u64, - pub end: u64, -} - -impl Display for Interval { - fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { - write!(f, "[{}, {}]", self.start, self.end) - } -} - -impl From<Interval> for (u64, u64) { - fn from(val: Interval) -> Self { - (val.start, val.end) - } -} - -impl Interval { - pub fn new(start: u64, end: u64) -> Self { - debug_assert!(start > 0 && end < u64::MAX && end >= start.checked_sub(1).unwrap()); // TODO: make sure this is actually debug-only - Interval { start, end } - } - - pub fn empty() -> Self { - Self::new(1, 0) - } - - /// Returns the maximally allowed `u64` interval. We leave a margin of 1 from - /// both `u64` bounds (`0` and `u64::MAX`) in order to support the reduction of any - /// legal interval to an empty one by setting `end = start - 1` or `start = end + 1` - pub fn maximal() -> Self { - Self::new(1, u64::MAX.saturating_sub(1)) - } - - pub fn size(&self) -> u64 { - // Empty intervals are indicated by `self.end == self.start - 1`, so - // we avoid the overflow by first adding 1 - // Note: this function will panic if `self.end < self.start - 1` due to overflow - (self.end.checked_add(1).unwrap()) - .checked_sub(self.start) - .unwrap() - } - - pub fn is_empty(&self) -> bool { - self.size() == 0 - } - - pub fn increase(&self, offset: u64) -> Self { - Self::new( - self.start.checked_add(offset).unwrap(), - self.end.checked_add(offset).unwrap(), - ) - } - - pub fn decrease(&self, offset: u64) -> Self { - Self::new( - self.start.checked_sub(offset).unwrap(), - self.end.checked_sub(offset).unwrap(), - ) - } - - pub fn increase_start(&self, offset: u64) -> Self { - Self::new(self.start.checked_add(offset).unwrap(), self.end) - } - - pub fn decrease_start(&self, offset: u64) -> Self { - Self::new(self.start.checked_sub(offset).unwrap(), self.end) - } - - pub fn increase_end(&self, offset: u64) -> Self { - Self::new(self.start, self.end.checked_add(offset).unwrap()) - } - - pub fn decrease_end(&self, offset: u64) -> Self { - Self::new(self.start, self.end.checked_sub(offset).unwrap()) - } - - pub fn split_half(&self) -> (Self, Self) { - self.split_fraction(0.5) - } - - /// Splits this interval to two parts such that their - /// union is equal to the original interval and the first (left) part - /// contains the given fraction of the original interval's size. - /// Note: if the split results in fractional parts, this method rounds - /// the first part up and the last part down. - fn split_fraction(&self, fraction: f32) -> (Self, Self) { - let left_size = f32::ceil(self.size() as f32 * fraction) as u64; - - ( - Self::new( - self.start, - self.start - .checked_add(left_size) - .unwrap() - .checked_sub(1) - .unwrap(), - ), - Self::new(self.start.checked_add(left_size).unwrap(), self.end), - ) - } - - /// Splits this interval to exactly |sizes| parts where - /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly - /// equal to the interval's size. - pub fn split_exact(&self, sizes: &[u64]) -> Vec<Self> { - assert_eq!( - sizes.iter().sum::<u64>(), - self.size(), - "sum of sizes must be equal to the interval's size" - ); - let mut start = self.start; - sizes - .iter() - .map(|size| { - let interval = Self::new( - start, - start.checked_add(*size).unwrap().checked_sub(1).unwrap(), - ); - start = start.checked_add(*size).unwrap(); - interval - }) - .collect() - } - - /// Splits this interval to |sizes| parts - /// by the allocation rule described below. This method expects sum(sizes) - /// to be smaller or equal to the interval's size. Every part_i is - /// allocated at least sizes[i] capacity. The remaining budget is - /// split by an exponentially biased rule described below. - /// - /// This rule follows the GHOSTDAG protocol behavior where the child - /// with the largest subtree is expected to dominate the competition - /// for new blocks and thus grow the most. However, we may need to - /// add slack for non-largest subtrees in order to make CPU reindexing - /// attacks unworthy. - pub fn split_exponential(&self, sizes: &[u64]) -> Vec<Self> { - let interval_size = self.size(); - let sizes_sum = sizes.iter().sum::<u64>(); - assert!( - interval_size >= sizes_sum, - "interval's size must be greater than or equal to sum of sizes" - ); - assert!(sizes_sum > 0, "cannot split to 0 parts"); - if interval_size == sizes_sum { - return self.split_exact(sizes); - } - - // - // Add a fractional bias to every size in the provided sizes - // - - let mut remaining_bias = interval_size.checked_sub(sizes_sum).unwrap(); - let total_bias = remaining_bias as f64; - - let mut biased_sizes = Vec::<u64>::with_capacity(sizes.len()); - let exp_fractions = exponential_fractions(sizes); - for (i, fraction) in exp_fractions.iter().enumerate() { - let bias: u64 = if i == exp_fractions.len().checked_sub(1).unwrap() { - remaining_bias - } else { - remaining_bias.min(f64::round(total_bias * fraction) as u64) - }; - biased_sizes.push(sizes[i].checked_add(bias).unwrap()); - remaining_bias = remaining_bias.checked_sub(bias).unwrap(); - } - - self.split_exact(biased_sizes.as_slice()) - } - - pub fn contains(&self, other: Self) -> bool { - self.start <= other.start && other.end <= self.end - } - - pub fn strictly_contains(&self, other: Self) -> bool { - self.start <= other.start && other.end < self.end - } -} - -/// Returns a fraction for each size in sizes -/// as follows: -/// fraction[i] = 2^size[i] / sum_j(2^size[j]) -/// In the code below the above equation is divided by 2^max(size) -/// to avoid exploding numbers. Note that in 1 / 2^(max(size)-size[i]) -/// we divide 1 by potentially a very large number, which will -/// result in loss of float precision. This is not a problem - all -/// numbers close to 0 bear effectively the same weight. -fn exponential_fractions(sizes: &[u64]) -> Vec<f64> { - let max_size = sizes.iter().copied().max().unwrap_or_default(); - - let mut fractions = sizes - .iter() - .map(|s| 1f64 / 2f64.powf((max_size - s) as f64)) - .collect::<Vec<f64>>(); - - let fractions_sum = fractions.iter().sum::<f64>(); - for item in &mut fractions { - *item /= fractions_sum; - } - - fractions -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_interval_basics() { - let interval = Interval::new(101, 164); - let increased = interval.increase(10); - let decreased = increased.decrease(5); - // println!("{}", interval.clone()); - - assert_eq!(interval.start + 10, increased.start); - assert_eq!(interval.end + 10, increased.end); - - assert_eq!(interval.start + 5, decreased.start); - assert_eq!(interval.end + 5, decreased.end); - - assert_eq!(interval.size(), 64); - assert_eq!(Interval::maximal().size(), u64::MAX - 1); - assert_eq!(Interval::empty().size(), 0); - - let (empty_left, empty_right) = Interval::empty().split_half(); - assert_eq!(empty_left.size(), 0); - assert_eq!(empty_right.size(), 0); - - assert_eq!(interval.start + 10, interval.increase_start(10).start); - assert_eq!(interval.start - 10, interval.decrease_start(10).start); - assert_eq!(interval.end + 10, interval.increase_end(10).end); - assert_eq!(interval.end - 10, interval.decrease_end(10).end); - - assert_eq!(interval.end, interval.increase_start(10).end); - assert_eq!(interval.end, interval.decrease_start(10).end); - assert_eq!(interval.start, interval.increase_end(10).start); - assert_eq!(interval.start, interval.decrease_end(10).start); - - // println!("{:?}", Interval::maximal()); - // println!("{:?}", Interval::maximal().split_half()); - } - - #[test] - fn test_split_exact() { - let sizes = vec![5u64, 10, 15, 20]; - let intervals = Interval::new(1, 50).split_exact(sizes.as_slice()); - assert_eq!(intervals.len(), sizes.len()); - for i in 0..sizes.len() { - assert_eq!(intervals[i].size(), sizes[i]) - } - } - - #[test] - fn test_exponential_fractions() { - let mut exp_fractions = exponential_fractions(vec![2, 4, 8, 16].as_slice()); - // println!("{:?}", exp_fractions); - for i in 0..exp_fractions.len() - 1 { - assert!(exp_fractions[i + 1] > exp_fractions[i]); - } - - exp_fractions = exponential_fractions(vec![].as_slice()); - assert_eq!(exp_fractions.len(), 0); - - exp_fractions = exponential_fractions(vec![0, 0].as_slice()); - assert_eq!(exp_fractions.len(), 2); - assert_eq!(0.5f64, exp_fractions[0]); - assert_eq!(exp_fractions[0], exp_fractions[1]); - } - - #[test] - fn test_contains() { - assert!(Interval::new(1, 100).contains(Interval::new(1, 100))); - assert!(Interval::new(1, 100).contains(Interval::new(1, 99))); - assert!(Interval::new(1, 100).contains(Interval::new(2, 100))); - assert!(Interval::new(1, 100).contains(Interval::new(2, 99))); - assert!(!Interval::new(1, 100).contains(Interval::new(50, 150))); - assert!(!Interval::new(1, 100).contains(Interval::new(150, 160))); - } - - #[test] - fn test_split_exponential() { - struct Test { - interval: Interval, - sizes: Vec<u64>, - expected: Vec<Interval>, - } - - let tests = [ - Test { - interval: Interval::new(1, 100), - sizes: vec![100u64], - expected: vec![Interval::new(1, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![50u64, 50], - expected: vec![Interval::new(1, 50), Interval::new(51, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![10u64, 20, 30, 40], - expected: vec![ - Interval::new(1, 10), - Interval::new(11, 30), - Interval::new(31, 60), - Interval::new(61, 100), - ], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![25u64, 25], - expected: vec![Interval::new(1, 50), Interval::new(51, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![1u64, 1], - expected: vec![Interval::new(1, 50), Interval::new(51, 100)], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![33u64, 33, 33], - expected: vec![ - Interval::new(1, 33), - Interval::new(34, 66), - Interval::new(67, 100), - ], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![10u64, 15, 25], - expected: vec![ - Interval::new(1, 10), - Interval::new(11, 25), - Interval::new(26, 100), - ], - }, - Test { - interval: Interval::new(1, 100), - sizes: vec![25u64, 15, 10], - expected: vec![ - Interval::new(1, 75), - Interval::new(76, 90), - Interval::new(91, 100), - ], - }, - Test { - interval: Interval::new(1, 10_000), - sizes: vec![10u64, 10, 20], - expected: vec![ - Interval::new(1, 20), - Interval::new(21, 40), - Interval::new(41, 10_000), - ], - }, - Test { - interval: Interval::new(1, 100_000), - sizes: vec![31_000u64, 31_000, 30_001], - expected: vec![ - Interval::new(1, 35_000), - Interval::new(35_001, 69_999), - Interval::new(70_000, 100_000), - ], - }, - ]; - - for test in &tests { - assert_eq!( - test.expected, - test.interval.split_exponential(test.sizes.as_slice()) - ); - } - } -} diff --git a/flexidag/src/dag/types/mod.rs b/flexidag/src/dag/types/mod.rs deleted file mode 100644 index d3acae1c23..0000000000 --- a/flexidag/src/dag/types/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod ghostdata; -pub mod interval; -pub mod ordering; -pub mod perf; -pub mod reachability; -pub mod trusted; diff --git a/flexidag/src/dag/types/ordering.rs b/flexidag/src/dag/types/ordering.rs deleted file mode 100644 index a1ed8c2561..0000000000 --- a/flexidag/src/dag/types/ordering.rs +++ /dev/null @@ -1,36 +0,0 @@ -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlueWorkType; -use std::cmp::Ordering; - -#[derive(Eq, Clone, Debug, Serialize, Deserialize)] -pub struct SortableBlock { - pub hash: Hash, - pub blue_work: BlueWorkType, -} - -impl SortableBlock { - pub fn new(hash: Hash, blue_work: BlueWorkType) -> Self { - Self { hash, blue_work } - } -} - -impl PartialEq for SortableBlock { - fn eq(&self, other: &Self) -> bool { - self.hash == other.hash - } -} - -impl PartialOrd for SortableBlock { - fn partial_cmp(&self, other: &Self) -> Option<Ordering> { - Some(self.cmp(other)) - } -} - -impl Ord for SortableBlock { - fn cmp(&self, other: &Self) -> Ordering { - self.blue_work - .cmp(&other.blue_work) - .then_with(|| self.hash.cmp(&other.hash)) - } -} diff --git a/flexidag/src/dag/types/perf.rs b/flexidag/src/dag/types/perf.rs deleted file mode 100644 index 6da44d4cd7..0000000000 --- a/flexidag/src/dag/types/perf.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! -//! A module for performance critical constants which depend on consensus parameters. -//! The constants in this module should all be revisited if mainnet consensus parameters change. -//! - -/// The default target depth for reachability reindexes. -pub const DEFAULT_REINDEX_DEPTH: u64 = 100; - -/// The default slack interval used by the reachability -/// algorithm to encounter for blocks out of the selected chain. -pub const DEFAULT_REINDEX_SLACK: u64 = 1 << 12; - -#[derive(Clone, Debug)] -pub struct PerfParams { - // - // Cache sizes - // - /// Preferred cache size for header-related data - pub header_data_cache_size: u64, - - /// Preferred cache size for block-body-related data which - /// is typically orders-of magnitude larger than header data - /// (Note this cannot be set to high due to severe memory consumption) - pub block_data_cache_size: u64, - - /// Preferred cache size for UTXO-related data - pub utxo_set_cache_size: u64, - - /// Preferred cache size for block-window-related data - pub block_window_cache_size: u64, - - // - // Thread-pools - // - /// Defaults to 0 which indicates using system default - /// which is typically the number of logical CPU cores - pub block_processors_num_threads: usize, - - /// Defaults to 0 which indicates using system default - /// which is typically the number of logical CPU cores - pub virtual_processor_num_threads: usize, -} - -pub const PERF_PARAMS: PerfParams = PerfParams { - header_data_cache_size: 10_000, - block_data_cache_size: 200, - utxo_set_cache_size: 10_000, - block_window_cache_size: 2000, - block_processors_num_threads: 0, - virtual_processor_num_threads: 0, -}; diff --git a/flexidag/src/dag/types/reachability.rs b/flexidag/src/dag/types/reachability.rs deleted file mode 100644 index 35dc3979b6..0000000000 --- a/flexidag/src/dag/types/reachability.rs +++ /dev/null @@ -1,26 +0,0 @@ -use super::interval::Interval; -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::BlockHashes; -use std::sync::Arc; - -#[derive(Clone, Default, Debug, Serialize, Deserialize)] -pub struct ReachabilityData { - pub children: BlockHashes, - pub parent: Hash, - pub interval: Interval, - pub height: u64, - pub future_covering_set: BlockHashes, -} - -impl ReachabilityData { - pub fn new(parent: Hash, interval: Interval, height: u64) -> Self { - Self { - children: Arc::new(vec![]), - parent, - interval, - height, - future_covering_set: Arc::new(vec![]), - } - } -} diff --git a/flexidag/src/dag/types/trusted.rs b/flexidag/src/dag/types/trusted.rs deleted file mode 100644 index 9a4cf37bbd..0000000000 --- a/flexidag/src/dag/types/trusted.rs +++ /dev/null @@ -1,26 +0,0 @@ -use serde::{Deserialize, Serialize}; -use starcoin_crypto::HashValue as Hash; -use starcoin_types::blockhash::{BlockHashMap, BlueWorkType, KType}; - -/// Represents semi-trusted externally provided Ghostdag data (by a network peer) -#[derive(Clone, Serialize, Deserialize)] -pub struct ExternalGhostdagData { - pub blue_score: u64, - pub blue_work: BlueWorkType, - pub selected_parent: Hash, - pub mergeset_blues: Vec<Hash>, - pub mergeset_reds: Vec<Hash>, - pub blues_anticone_sizes: BlockHashMap<KType>, -} - -/// Represents externally provided Ghostdag data associated with a block Hash -pub struct TrustedGhostdagData { - pub hash: Hash, - pub ghostdag: ExternalGhostdagData, -} - -impl TrustedGhostdagData { - pub fn new(hash: Hash, ghostdag: ExternalGhostdagData) -> Self { - Self { hash, ghostdag } - } -} diff --git a/flexidag/src/flexidag_service.rs b/flexidag/src/flexidag_service.rs new file mode 100644 index 0000000000..63ceb2431b --- /dev/null +++ b/flexidag/src/flexidag_service.rs @@ -0,0 +1,568 @@ +use std::{ + collections::{BTreeSet, BinaryHeap}, + sync::Arc, +}; + +use anyhow::{anyhow, bail, Error, Ok, Result}; +use starcoin_accumulator::{accumulator_info::AccumulatorInfo, Accumulator, MerkleAccumulator, node::AccumulatorStoreType}; +use starcoin_config::{NodeConfig, TimeService}; +use starcoin_consensus::{dag::types::ghostdata::GhostdagData, BlockDAG}; +use starcoin_crypto::HashValue; +use starcoin_service_registry::{ + ActorService, ServiceContext, ServiceFactory, ServiceHandler, ServiceRequest, +}; +use starcoin_storage::{ + flexi_dag::{SyncFlexiDagSnapshot, SyncFlexiDagSnapshotHasher}, + storage::CodecKVStore, + BlockStore, Storage, SyncFlexiDagStore, block_info::BlockInfoStore, Store, +}; +use starcoin_types::{block::BlockHeader, startup_info, dag_block::KTotalDifficulty}; + +#[derive(Debug, Clone)] +pub struct DumpTipsToAccumulator { + pub block_header: BlockHeader, + pub current_head_block_id: HashValue, + pub k_total_difficulty: KTotalDifficulty, +} + +impl ServiceRequest for DumpTipsToAccumulator { + type Response = anyhow::Result<()>; +} + +#[derive(Debug, Clone)] +pub struct UpdateDagTips { + pub block_header: BlockHeader, + pub current_head_block_id: HashValue, + pub k_total_difficulty: KTotalDifficulty, +} + +impl ServiceRequest for UpdateDagTips { + type Response = anyhow::Result<()>; +} + +#[derive(Debug, Clone)] +pub struct GetDagTips; + +impl ServiceRequest for GetDagTips { + type Response = anyhow::Result<Option<Vec<HashValue>>>; +} + +#[derive(Debug, Clone)] +pub struct GetDagAccumulatorInfo; + +impl ServiceRequest for GetDagAccumulatorInfo { + type Response = anyhow::Result<Option<AccumulatorInfo>>; +} + +#[derive(Debug, Clone)] +pub struct GetDagAccumulatorLeafDetail { + pub leaf_index: u64, + pub batch_size: u64, +} + +#[derive(Debug, Clone)] +pub struct DagAccumulatorLeafDetail { + pub accumulator_root: HashValue, + pub tips: Vec<HashValue>, +} + +impl ServiceRequest for GetDagAccumulatorLeafDetail { + type Response = anyhow::Result<Vec<DagAccumulatorLeafDetail>>; +} + +#[derive(Debug, Clone)] +pub struct GetDagBlockParents { + pub block_id: HashValue, +} + +#[derive(Debug, Clone)] +pub struct DagBlockParents { + pub parents: Vec<HashValue>, +} + +impl ServiceRequest for GetDagBlockParents { + type Response = anyhow::Result<DagBlockParents>; +} + +#[derive(Debug, Clone)] +pub struct GetDagAccumulatorLeaves { + pub leaf_index: u64, + pub batch_size: u64, + pub reverse: bool, +} + +#[derive(Debug, Clone)] +pub struct DagAccumulatorLeaf { + pub leaf_index: u64, + pub dag_accumulator_root: HashValue, +} + +impl ServiceRequest for GetDagAccumulatorLeaves { + type Response = anyhow::Result<Vec<DagAccumulatorLeaf>>; +} + +#[derive(Debug, Clone)] +pub struct AddToDag { + pub block_header: BlockHeader, +} + +#[derive(Debug, Clone)] +pub struct MergesetBlues { + pub selected_parent: HashValue, + pub mergeset_blues: Vec<HashValue>, +} + +impl ServiceRequest for AddToDag { + type Response = anyhow::Result<MergesetBlues>; +} + +#[derive(Debug, Clone)] +pub struct ForkDagAccumulator { + pub new_blocks: Vec<HashValue>, + pub dag_accumulator_index: u64, + pub block_header_id: HashValue, +} + +impl ServiceRequest for ForkDagAccumulator { + type Response = anyhow::Result<AccumulatorInfo>; +} + +#[derive(Debug, Clone)] +pub struct FinishSync { + pub dag_accumulator_info: AccumulatorInfo, +} + +impl ServiceRequest for FinishSync { + type Response = anyhow::Result<()>; +} + +pub struct TipInfo { + tips: Option<Vec<HashValue>>, + // some is for dag or the state of the chain is still in old version + k_total_difficulties: BTreeSet<KTotalDifficulty>, +} + +pub struct FlexidagService { + dag: Option<BlockDAG>, + dag_accumulator: Option<MerkleAccumulator>, + tip_info: Option<TipInfo>, + storage: Arc<Storage>, +} + +impl FlexidagService { + pub fn add_to_dag(&mut self, header: BlockHeader) -> Result<Arc<GhostdagData>> { + let dag = match &mut self.dag { + Some(dag) => dag, + None => bail!("dag is none"), + }; + match dag.get_ghostdag_data_by_child(header.id()) { + std::result::Result::Ok(ghost_dag_data) => Ok(ghost_dag_data), + Err(_) => { + dag.commit(header.clone())?; + dag.get_ghostdag_data_by_child(header.id()) + } + } + } + + fn create_snapshot_by_tips(tips: Vec<HashValue>, head_block_id: HashValue, storage: Arc<Storage>) -> Result<(HashValue, SyncFlexiDagSnapshotHasher)> { + let mut k_total_difficulties = BTreeSet::new(); + tips.iter().try_for_each(|block_id| { + k_total_difficulties.insert(KTotalDifficulty { + head_block_id: block_id.clone(), + total_difficulty: storage.get_block_info(block_id.clone()).expect("block info should not be none").ok_or_else(|| anyhow!("block info should not be none"))?.total_difficulty, + }); + Ok(()) + })?; + + let snapshot_hasher = SyncFlexiDagSnapshotHasher { + child_hashes: tips, + head_block_id, + k_total_difficulties, + }; + + Ok((BlockDAG::calculate_dag_accumulator_key(&snapshot_hasher)?, snapshot_hasher)) + } + + fn merge_from_big_dag(&mut self, msg: ForkDagAccumulator) -> Result<AccumulatorInfo> { + let dag_accumulator = self.dag_accumulator.as_mut().ok_or_else(|| anyhow!("the dag accumulator should not be none"))?; + if dag_accumulator.num_leaves() != msg.dag_accumulator_index { + bail!("cannot merge dag accumulator since its number is not the same as other"); + } + let tip_info = self.tip_info.as_mut().ok_or_else(|| anyhow!("the tips should not be none"))?; + msg.new_blocks.iter().for_each(|block_id| { + if !tip_info.tips.as_ref().expect("tips should not be none").contains(block_id) { + tip_info.tips.as_mut().expect("tips should not be none").push(block_id.clone()); + } + }); + + let (key, snaphot_hasher) = Self::create_snapshot_by_tips(tip_info.tips.as_ref().expect("tips should not be none").clone(), msg.block_header_id, self.storage.clone())?; + dag_accumulator.append(&vec![key])?; + let dag_accumulator_info = dag_accumulator.get_info(); + self.storage.get_accumulator_snapshot_storage().put(key, snaphot_hasher.to_snapshot(dag_accumulator_info.clone()))?; + dag_accumulator.flush()?; + Ok(dag_accumulator_info) + } + + fn merge_from_small_dag(&mut self, msg: ForkDagAccumulator) -> Result<AccumulatorInfo> { + let dag_accumulator = self + .dag_accumulator + .as_mut() + .ok_or_else(|| anyhow!("dag accumulator is none"))?; + // fetch the block in the dag according to the dag accumulator index + let previous_key = dag_accumulator.get_leaf(msg.dag_accumulator_index - 1)? + .ok_or_else(|| anyhow!("the dag snapshot hash is none"))?; + + let current_key = dag_accumulator.get_leaf(msg.dag_accumulator_index)? + .ok_or_else(|| anyhow!("the dag snapshot hash is none"))?; + + let pre_snapshot = self + .storage + .get_accumulator_snapshot_storage() + .get(previous_key)? + .ok_or_else(|| anyhow!("the dag snapshot is none"))?; + + let current_snapshot = self + .storage + .get_accumulator_snapshot_storage() + .get(current_key)? + .ok_or_else(|| anyhow!("the dag snapshot is none"))?; + + // fork the dag accumulator according to the ForkDagAccumulator.dag_accumulator_index + let fork = dag_accumulator.fork(Some(pre_snapshot.accumulator_info)); + + let mut new_blocks = msg.new_blocks; + current_snapshot.child_hashes.iter().for_each(|block_id| { + if !new_blocks.contains(block_id) { + new_blocks.push(block_id.clone()); + } + }); + + let (key, snaphot_hasher) = Self::create_snapshot_by_tips(new_blocks, msg.block_header_id, self.storage.clone())?; + fork.append(&vec![key])?; + let dag_accumulator_info = fork.get_info(); + self.storage.get_accumulator_snapshot_storage().put(key, snaphot_hasher.to_snapshot(dag_accumulator_info.clone()))?; + fork.flush()?; + Ok(dag_accumulator_info) + } +} + +impl ServiceFactory<Self> for FlexidagService { + fn create(ctx: &mut ServiceContext<FlexidagService>) -> Result<Self> { + let storage = ctx.get_shared::<Arc<Storage>>()?; + let config = ctx.get_shared::<Arc<NodeConfig>>()?; + let (dag, dag_accumulator) = + BlockDAG::try_init_with_storage(storage.clone(), config.clone())?; + let tip_info = dag_accumulator.as_ref().map(|accumulator| { + let tips_index = accumulator.num_leaves(); + let tips_key = accumulator + .get_leaf(tips_index) + .expect("failed to read the dag snapshot hash") + .expect("the dag snapshot hash is none"); + let snapshot = storage + .get_accumulator_snapshot_storage() + .get(tips_key) + .expect("failed to read the snapsho object") + .expect("dag snapshot object is none"); + TipInfo { + tips: Some(snapshot.child_hashes), + k_total_difficulties: snapshot.k_total_difficulties, + } + }); + Ok(Self { + dag, + dag_accumulator, + tip_info, + storage: storage.clone(), + }) + } +} + +impl ActorService for FlexidagService { + fn started(&mut self, ctx: &mut ServiceContext<Self>) -> Result<()> { + // ctx.subscribe::<NewHeadBlock>(); + Ok(()) + } + + fn stopped(&mut self, ctx: &mut ServiceContext<Self>) -> Result<()> { + // ctx.unsubscribe::<NewHeadBlock>(); + Ok(()) + } +} + +// send this message after minting a new block +// and the block was committed +// and startup info was updated +impl ServiceHandler<Self, DumpTipsToAccumulator> for FlexidagService { + fn handle( + &mut self, + msg: DumpTipsToAccumulator, + ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<()> { + let storage = ctx.get_shared::<Arc<Storage>>()?; + if self.tip_info.is_none() { + let config = ctx.get_shared::<Arc<NodeConfig>>()?; + let (dag, dag_accumulator) = BlockDAG::try_init_with_storage(storage.clone(), config)?; + if dag.is_none() { + Ok(()) // the chain is still in single chain + } else { + // initialize the dag data, the chain will be the dag chain at next block + self.dag = dag; + self.dag_accumulator = dag_accumulator; + self.tip_info = Some(TipInfo { + tips: Some(vec![msg.block_header.id()]), + k_total_difficulties: [msg.k_total_difficulty].into_iter().collect(), + }); + self.storage = storage.clone(); + Ok(()) + } + } else { + // the chain had became the flexidag chain + let mut tip_info = self + .tip_info + .take() + .expect("the tips should not be none in this branch"); + let snapshot_hasher = SyncFlexiDagSnapshotHasher { + child_hashes: tip_info.tips.expect("the tips should not be none"), + head_block_id: msg.current_head_block_id, + k_total_difficulties: tip_info.k_total_difficulties, + }; + let key = BlockDAG::calculate_dag_accumulator_key(&snapshot_hasher)?; + let dag = self + .dag_accumulator + .as_mut() + .expect("the tips is not none but the dag accumulator is none"); + dag.append(&vec![key])?; + storage.get_accumulator_snapshot_storage().put( + key, + snapshot_hasher.to_snapshot(dag.get_info()), + )?; + dag.flush()?; + self.tip_info = Some(TipInfo { + tips: Some(vec![msg.block_header.id()]), + k_total_difficulties: [msg.k_total_difficulty].into_iter().collect(), + }); + self.storage = storage.clone(); + Ok(()) + } + } +} + +impl ServiceHandler<Self, UpdateDagTips> for FlexidagService { + fn handle( + &mut self, + msg: UpdateDagTips, + ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<()> { + let header = msg.block_header; + match &mut self.tip_info { + Some(tip_info) => { + if !tip_info.tips.as_ref().expect("tips should not be none").contains(&header.id()) { + tip_info.tips.as_mut().expect("tips should not be none").push(header.id()); + tip_info.k_total_difficulties.insert(KTotalDifficulty { + head_block_id: msg.k_total_difficulty.head_block_id, + total_difficulty: msg.k_total_difficulty.total_difficulty, + }); + } + Ok(()) + } + None => { + let storage = ctx.get_shared::<Arc<Storage>>()?; + let config = ctx.get_shared::<Arc<NodeConfig>>()?; + if header.number() == storage.dag_fork_height(config.net().id().clone()) { + let (dag, dag_accumulator) = + BlockDAG::try_init_with_storage(storage.clone(), config)?; + if dag.is_none() { + Ok(()) // the chain is still in single chain + } else { + // initialize the dag data, the chain will be the dag chain at next block + self.dag = dag; + self.tip_info = Some(TipInfo { + tips: Some(vec![header.id()]), + k_total_difficulties: [msg.k_total_difficulty] + .into_iter() + .collect(), + }); + self.dag_accumulator = dag_accumulator; + + storage + .get_startup_info()? + .map(|mut startup_info| { + startup_info.dag_main = Some(header.id()); + storage.save_startup_info(startup_info) + }) + .expect("starup info should not be none") + } + } else { + Ok(()) // drop the block, the chain is still in single chain + } + } + } + } +} + +impl ServiceHandler<Self, GetDagTips> for FlexidagService { + fn handle( + &mut self, + _msg: GetDagTips, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<Option<Vec<HashValue>>> { + Ok(self.tip_info.as_ref().ok_or_else(|| anyhow!("tip info is none"))?.tips.clone()) + } +} + +impl ServiceHandler<Self, GetDagAccumulatorInfo> for FlexidagService { + fn handle( + &mut self, + _msg: GetDagAccumulatorInfo, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<Option<AccumulatorInfo>> { + Ok(self + .dag_accumulator + .as_ref() + .map(|dag_accumulator_info| dag_accumulator_info.get_info())) + } +} + +impl ServiceHandler<Self, GetDagAccumulatorLeaves> for FlexidagService { + fn handle( + &mut self, + msg: GetDagAccumulatorLeaves, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<Vec<DagAccumulatorLeaf>> { + match &self.dag_accumulator { + Some(dag_accumulator) => { + let end_index = std::cmp::min( + msg.leaf_index + msg.batch_size - 1, + dag_accumulator.num_leaves() - 1, + ); + let mut result = vec![]; + for index in msg.leaf_index..=end_index { + let real_index = if msg.reverse { + end_index - index + 1 + } else { + index + }; + let key = dag_accumulator + .get_leaf(real_index)? + .ok_or_else(|| anyhow!("the dag snapshot hash is none"))?; + let snaptshot = self + .storage + .get_accumulator_snapshot_storage() + .get(key)? + .expect("the snapshot should not be none"); + result.push(DagAccumulatorLeaf { + leaf_index: real_index, + dag_accumulator_root: snaptshot.accumulator_info.accumulator_root, + }); + } + Ok(result) + } + None => bail!("dag accumulator is none"), + } + } +} + +impl ServiceHandler<Self, GetDagBlockParents> for FlexidagService { + fn handle( + &mut self, + msg: GetDagBlockParents, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<DagBlockParents> { + match &self.dag { + Some(dag) => Ok(DagBlockParents { + parents: dag.get_parents(msg.block_id)?, + }), + None => bail!("dag is none"), + } + } +} + +impl ServiceHandler<Self, GetDagAccumulatorLeafDetail> for FlexidagService { + fn handle( + &mut self, + msg: GetDagAccumulatorLeafDetail, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<Vec<DagAccumulatorLeafDetail>> { + match &self.dag_accumulator { + Some(dag_accumulator) => { + let end_index = std::cmp::min( + msg.leaf_index + msg.batch_size - 1, + dag_accumulator.num_leaves() - 1, + ); + let mut details = vec![]; + let snapshot_storage = self.storage.get_accumulator_snapshot_storage(); + for index in msg.leaf_index..=end_index { + let key = dag_accumulator + .get_leaf(index)? + .ok_or_else(|| anyhow!("the dag snapshot hash is none"))?; + let snapshot = snapshot_storage + .get(key)? + .ok_or_else(|| anyhow!("the dag snapshot is none"))?; + details.push(DagAccumulatorLeafDetail { + accumulator_root: snapshot.accumulator_info.accumulator_root, + tips: snapshot.child_hashes, + }); + } + Ok(details) + } + None => bail!("dag accumulator is none"), + } + } +} + +impl ServiceHandler<Self, AddToDag> for FlexidagService { + fn handle( + &mut self, + msg: AddToDag, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<MergesetBlues> { + let ghost_dag_data = self.add_to_dag(msg.block_header)?; + Ok(MergesetBlues { + selected_parent: ghost_dag_data.selected_parent, + mergeset_blues: ghost_dag_data.mergeset_blues.as_ref().clone(), + }) + } +} + +impl ServiceHandler<Self, ForkDagAccumulator> for FlexidagService { + fn handle( + &mut self, + msg: ForkDagAccumulator, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<AccumulatorInfo> { + let dag_accumulator = self + .dag_accumulator + .as_ref() + .ok_or_else(|| anyhow!("dag accumulator is none"))?; + + if msg.dag_accumulator_index > dag_accumulator.num_leaves() { + self.merge_from_big_dag(msg) + } else { + self.merge_from_small_dag(msg) + } + } +} + +impl ServiceHandler<Self, FinishSync> for FlexidagService { + fn handle( + &mut self, + msg: FinishSync, + _ctx: &mut ServiceContext<FlexidagService>, + ) -> Result<()> { + let dag_accumulator = self.dag_accumulator.as_mut().ok_or_else(|| anyhow!("the dag_accumulator is none when sync finish"))?; + let local_info = dag_accumulator.get_info(); + if msg.dag_accumulator_info.get_num_leaves() < local_info.get_num_leaves() { + let start_idnex = msg.dag_accumulator_info.get_num_leaves(); + let new_dag_accumulator = MerkleAccumulator::new_with_info(msg.dag_accumulator_info, self.storage.get_accumulator_store(AccumulatorStoreType::SyncDag)); + for index in start_idnex..local_info.get_num_leaves() { + let key = dag_accumulator.get_leaf(index)?.ok_or_else(|| anyhow!("the dag_accumulator leaf is none when sync finish"))?; + new_dag_accumulator.append(&[key])?; + } + self.dag_accumulator = Some(new_dag_accumulator); + Ok(()) + } else { + self.dag_accumulator = Some(MerkleAccumulator::new_with_info(msg.dag_accumulator_info, self.storage.get_accumulator_store(AccumulatorStoreType::SyncDag))); + Ok(()) + } + } +} \ No newline at end of file diff --git a/flexidag/src/lib.rs b/flexidag/src/lib.rs index fd332ebcee..66689bb05f 100644 --- a/flexidag/src/lib.rs +++ b/flexidag/src/lib.rs @@ -1,11 +1,17 @@ -mod blockdag; -pub mod consensusdb; -pub mod dag; -pub use blockdag::BlockDAG; -pub use consensusdb::consensus_relations::{ - DbRelationsStore, RelationsStore, RelationsStoreReader, -}; -pub use consensusdb::prelude::{FlexiDagStorage, FlexiDagStorageConfig, StoreError}; -pub use consensusdb::schema; pub mod flexidag_service; pub use flexidag_service::FlexidagService; + +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 2cbd91a85a..b1c2b2641f 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -270,7 +270,7 @@ where let id = new_block.header().id(); debug!("try connect mined block: {}", id); - match self.chain_service.try_connect(block) { + match self.chain_service.try_connect(new_block.as_ref().clone()) { std::result::Result::Ok(ConnectOk::DagConnected) => { match self.chain_service.dump_tips(block_header) { std::result::Result::Ok(_) => (), diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 7f0e0cdaba..1eeb5138df 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -417,9 +417,9 @@ where dag_accumulator_index: start_index, block_header_id: self.chain.head_block().id(), }))??); - if state == State::Enough { + if state == CollectorState::Enough { async_std::task::block_on(self.flexidag_service.send(FinishSync { - dag_accumulator_info: self.new_dag_accumulator_info.clone(), + dag_accumulator_info: self.new_dag_accumulator_info.clone().expect("dag acc should exists"), }))?? } return Ok(state);