From 0cce59e05e19f6a154434676c850b24b13b47c36 Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Mon, 6 Jan 2025 12:20:40 +0200 Subject: [PATCH 1/8] feat: state sync optimization (#346) * feat: optimization refactor * fix: display non-utf8 path * remove check on height * refactor: various refactoring and comments * refactor: suggestions and richer doc * refactor: better type alias * fix: build for verify feature * doc: added documentation for add_subtree_sync_info * refactor: applied suggestions --- grovedb/src/lib.rs | 148 ++- grovedb/src/replication.rs | 941 +++++++----------- grovedb/src/replication/state_sync_session.rs | 618 ++++++++++++ storage/src/rocksdb_storage/storage.rs | 30 +- storage/src/storage.rs | 27 + tutorials/Cargo.toml | 1 + tutorials/src/bin/replication.rs | 36 +- 7 files changed, 1188 insertions(+), 613 deletions(-) create mode 100644 grovedb/src/replication/state_sync_session.rs diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 57f68d33..f3b2dcc4 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -240,6 +240,25 @@ pub type Transaction<'db> = >::Transaction; #[cfg(feature = "full")] pub type TransactionArg<'db, 'a> = Option<&'a Transaction<'db>>; +/// Type alias for the return type of the `verify_merk_and_submerks` and +/// `verify_grovedb` functions. It represents a mapping of paths (as vectors of +/// vectors of bytes) to a tuple of three cryptographic hashes: the root hash, +/// the combined value hash, and the expected value hash. +#[cfg(feature = "full")] +type VerificationIssues = HashMap>, (CryptoHash, CryptoHash, CryptoHash)>; + +/// Type alias for the return type of the `open_merk_for_replication` function. +/// It represents a tuple containing: +/// - A `Merk` instance with a prefixed RocksDB immediate storage context. +/// - An optional `root_key`, represented as a vector of bytes. +/// - A boolean indicating whether the Merk is a sum tree. +#[cfg(feature = "full")] +type OpenedMerkForReplication<'tx> = ( + Merk>, + Option>, + bool, +); + #[cfg(feature = "full")] impl GroveDb { /// Opens a given path @@ -330,6 +349,46 @@ impl GroveDb { } } + fn open_transactional_merk_by_prefix<'db>( + &'db self, + prefix: SubtreePrefix, + root_key: Option>, + is_sum_tree: bool, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, + ) -> CostResult>, Error> { + let mut cost = OperationCost::default(); + let storage = self + .db + .get_transactional_storage_context_by_subtree_prefix(prefix, batch, tx) + .unwrap_add_cost(&mut cost); + if root_key.is_some() { + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData( + "cannot open a subtree by prefix with given root key".to_owned(), + ) + }) + .add_cost(cost) + } else { + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a root subtree by prefix".to_owned())) + .add_cost(cost) + } + } + /// Opens a Merk at given path for with direct write access. Intended for /// replication purposes. fn open_merk_for_replication<'tx, 'db: 'tx, 'b, B>( @@ -337,7 +396,7 @@ impl GroveDb { path: SubtreePath<'b, B>, tx: &'tx Transaction<'db>, grove_version: &GroveVersion, - ) -> Result>, Error> + ) -> Result, Error> where B: AsRef<[u8]> + 'b, { @@ -364,31 +423,39 @@ impl GroveDb { .unwrap()?; let is_sum_tree = element.is_sum_tree(); if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { - Merk::open_layered_with_root_key( - storage, + Ok(( + Merk::open_layered_with_root_key( + storage, + root_key.clone(), + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + }) + .unwrap()?, root_key, is_sum_tree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .unwrap() + )) } else { Err(Error::CorruptedPath( "cannot open a subtree as parent exists but is not a tree".to_string(), )) } } else { - Merk::open_base( - storage, + Ok(( + Merk::open_base( + storage, + false, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) + .unwrap()?, + None, false, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .unwrap() + )) } } @@ -398,7 +465,7 @@ impl GroveDb { path: SubtreePath<'b, B>, batch: Option<&'db StorageBatch>, grove_version: &GroveVersion, - ) -> CostResult, Error> + ) -> CostResult>, Error> where B: AsRef<[u8]> + 'b, { @@ -458,6 +525,45 @@ impl GroveDb { } } + fn open_non_transactional_merk_by_prefix<'db>( + &'db self, + prefix: SubtreePrefix, + root_key: Option>, + is_sum_tree: bool, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, + ) -> CostResult>, Error> { + let mut cost = OperationCost::default(); + let storage = self + .db + .get_storage_context_by_subtree_prefix(prefix, batch) + .unwrap_add_cost(&mut cost); + if root_key.is_some() { + Merk::open_layered_with_root_key( + storage, + root_key, + is_sum_tree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| { + Error::CorruptedData( + "cannot open a subtree by prefix with given root key".to_owned(), + ) + }) + .add_cost(cost) + } else { + Merk::open_base( + storage, + false, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| Error::CorruptedData("cannot open a root subtree by prefix".to_owned())) + .add_cost(cost) + } + } + /// Creates a checkpoint pub fn create_checkpoint>(&self, path: P) -> Result<(), Error> { self.db.create_checkpoint(path).map_err(|e| e.into()) @@ -935,7 +1041,7 @@ impl GroveDb { verify_references: bool, allow_cache: bool, grove_version: &GroveVersion, - ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + ) -> Result { if let Some(transaction) = transaction { let root_merk = self .open_transactional_merk_at_path( @@ -979,7 +1085,7 @@ impl GroveDb { verify_references: bool, allow_cache: bool, grove_version: &GroveVersion, - ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + ) -> Result { let mut all_query = Query::new(); all_query.insert_all(); @@ -1123,7 +1229,7 @@ impl GroveDb { verify_references: bool, allow_cache: bool, grove_version: &GroveVersion, - ) -> Result>, (CryptoHash, CryptoHash, CryptoHash)>, Error> { + ) -> Result { let mut all_query = Query::new(); all_query.insert_all(); diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 876fe62c..1cd50519 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -1,268 +1,72 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - fmt, -}; - -use grovedb_merk::{ - ed::Encode, - merk::restore::Restorer, - proofs::{Decoder, Op}, - tree::{hash::CryptoHash, kv::ValueDefinedCostType, value_hash}, - ChunkProducer, -}; +mod state_sync_session; + +use std::pin::Pin; + +use grovedb_merk::{tree::hash::CryptoHash, ChunkProducer}; use grovedb_path::SubtreePath; -use grovedb_storage::rocksdb_storage::RocksDbStorage; -#[rustfmt::skip] -use grovedb_storage::rocksdb_storage::storage_context::context_immediate::PrefixedRocksDbImmediateStorageContext; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; -use crate::{replication, Error, GroveDb, Transaction, TransactionArg}; +pub use self::state_sync_session::MultiStateSyncSession; +use crate::{Error, GroveDb, TransactionArg}; -pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; +/// Type alias representing a chunk identifier in the state synchronization +/// process. +/// +/// - `SubtreePrefix`: The prefix of the subtree (32 bytes). +/// - `Option>`: The root key, which may be `None` if not present. +/// - `bool`: Indicates whether the tree is a sum tree. +/// - `Vec`: The chunk ID representing traversal instructions. +pub type ChunkIdentifier = (crate::SubtreePrefix, Option>, bool, Vec); pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; -#[derive(Default)] -struct SubtreeStateSyncInfo<'db> { - // Current Chunk restorer - restorer: Option>>, - // Set of global chunk ids requested to be fetched and pending for processing. For the - // description of global chunk id check fetch_chunk(). - pending_chunks: BTreeSet>, - // Number of processed chunks in current prefix (Path digest) - num_processed_chunks: usize, -} - -// Struct governing state sync -pub struct MultiStateSyncInfo<'db> { - // Map of current processing subtrees - // SubtreePrefix (Path digest) -> SubtreeStateSyncInfo - current_prefixes: BTreeMap>, - // Set of processed prefixes (Path digests) - processed_prefixes: BTreeSet, - // Root app_hash - app_hash: [u8; 32], - // Version of state sync protocol, - version: u16, -} - -impl<'db> Default for MultiStateSyncInfo<'db> { - fn default() -> Self { - Self { - current_prefixes: BTreeMap::new(), - processed_prefixes: BTreeSet::new(), - app_hash: [0; 32], - version: CURRENT_STATE_SYNC_VERSION, - } - } -} - -// Struct containing information about current subtrees found in GroveDB -pub struct SubtreesMetadata { - // Map of Prefix (Path digest) -> (Actual path, Parent Subtree actual_value_hash, Parent - // Subtree elem_value_hash) Note: Parent Subtree actual_value_hash, Parent Subtree - // elem_value_hash are needed when verifying the new constructed subtree after wards. - pub data: BTreeMap>, CryptoHash, CryptoHash)>, -} - -impl SubtreesMetadata { - pub fn new() -> SubtreesMetadata { - SubtreesMetadata { - data: BTreeMap::new(), - } - } -} - -impl Default for SubtreesMetadata { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for SubtreesMetadata { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for (prefix, metadata) in self.data.iter() { - let metadata_path = &metadata.0; - let metadata_path_str = util_path_to_string(metadata_path); - writeln!( - f, - " prefix:{:?} -> path:{:?}", - hex::encode(prefix), - metadata_path_str - )?; - } - Ok(()) - } -} - -// Converts a path into a human-readable string (for debugging) -pub fn util_path_to_string(path: &[Vec]) -> Vec { - let mut subtree_path_str: Vec = vec![]; - for subtree in path { - let string = std::str::from_utf8(subtree).expect("should be able to convert path"); - subtree_path_str.push( - string - .parse() - .expect("should be able to parse path to string"), - ); - } - subtree_path_str -} - -// Splits the given global chunk id into [SUBTREE_PREFIX:CHUNK_ID] -pub fn util_split_global_chunk_id( - global_chunk_id: &[u8], - app_hash: &[u8], -) -> Result<(crate::SubtreePrefix, Vec), Error> { - let chunk_prefix_length: usize = 32; - if global_chunk_id.len() < chunk_prefix_length { - return Err(Error::CorruptedData( - "expected global chunk id of at least 32 length".to_string(), - )); - } - - if global_chunk_id == app_hash { - let array_of_zeros: [u8; 32] = [0; 32]; - let root_chunk_prefix_key: crate::SubtreePrefix = array_of_zeros; - return Ok((root_chunk_prefix_key, vec![])); - } - - let (chunk_prefix, chunk_id) = global_chunk_id.split_at(chunk_prefix_length); - let mut array = [0u8; 32]; - array.copy_from_slice(chunk_prefix); - let chunk_prefix_key: crate::SubtreePrefix = array; - Ok((chunk_prefix_key, chunk_id.to_vec())) -} - -pub fn util_encode_vec_ops(chunk: Vec) -> Result, Error> { - let mut res = vec![]; - for op in chunk { - op.encode_into(&mut res) - .map_err(|e| Error::CorruptedData(format!("unable to encode chunk: {}", e)))?; +#[cfg(feature = "full")] +impl GroveDb { + pub fn start_syncing_session(&self, app_hash: [u8; 32]) -> Pin> { + MultiStateSyncSession::new(self.start_transaction(), app_hash) } - Ok(res) -} -pub fn util_decode_vec_ops(chunk: Vec) -> Result, Error> { - let decoder = Decoder::new(&chunk); - let mut res = vec![]; - for op in decoder { - match op { - Ok(op) => res.push(op), + pub fn commit_session(&self, session: Pin>) -> Result<(), Error> { + match self.commit_transaction(session.into_transaction()).value { + Ok(_) => Ok(()), Err(e) => { - return Err(Error::CorruptedData(format!( - "unable to decode chunk: {}", - e - ))); + // Log the error or handle it as needed + eprintln!("Failed to commit session: {:?}", e); + Err(e) } } } - Ok(res) -} -#[cfg(feature = "full")] -impl GroveDb { - // Returns the discovered subtrees found recursively along with their associated - // metadata Params: - // tx: Transaction. Function returns the data by opening merks at given tx. - // TODO: Add a SubTreePath as param and start searching from that path instead - // of root (as it is now) - pub fn get_subtrees_metadata( - &self, - tx: TransactionArg, - grove_version: &GroveVersion, - ) -> Result { - check_grovedb_v0!( - "is_empty_tree", - grove_version - .grovedb_versions - .replication - .get_subtrees_metadata - ); - let mut subtrees_metadata = SubtreesMetadata::new(); - - let subtrees_root = self - .find_subtrees(&SubtreePath::empty(), tx, grove_version) - .value?; - for subtree in subtrees_root.into_iter() { - let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); - let path: &[&[u8]] = &subtree_path; - let prefix = RocksDbStorage::build_prefix(path.as_ref().into()).unwrap(); - - let current_path = SubtreePath::from(path); - - match (current_path.derive_parent(), subtree.last()) { - (Some((parent_path, _)), Some(parent_key)) => match tx { - None => { - let parent_merk = self - .open_non_transactional_merk_at_path(parent_path, None, grove_version) - .value?; - if let Ok(Some((elem_value, elem_value_hash))) = parent_merk - .get_value_and_value_hash( - parent_key, - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .value - { - let actual_value_hash = value_hash(&elem_value).unwrap(); - subtrees_metadata.data.insert( - prefix, - (current_path.to_vec(), actual_value_hash, elem_value_hash), - ); - } - } - Some(t) => { - let parent_merk = self - .open_transactional_merk_at_path(parent_path, t, None, grove_version) - .value?; - if let Ok(Some((elem_value, elem_value_hash))) = parent_merk - .get_value_and_value_hash( - parent_key, - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .value - { - let actual_value_hash = value_hash(&elem_value).unwrap(); - subtrees_metadata.data.insert( - prefix, - (current_path.to_vec(), actual_value_hash, elem_value_hash), - ); - } - } - }, - _ => { - subtrees_metadata.data.insert( - prefix, - ( - current_path.to_vec(), - CryptoHash::default(), - CryptoHash::default(), - ), - ); - } - } - } - Ok(subtrees_metadata) - } - - // Fetch a chunk by global chunk id (should be called by ABCI when - // LoadSnapshotChunk method is called) Params: - // global_chunk_id: Global chunk id in the following format: - // [SUBTREE_PREFIX:CHUNK_ID] SUBTREE_PREFIX: 32 bytes (mandatory) (All zeros - // = Root subtree) CHUNK_ID: 0.. bytes (optional) Traversal instructions to - // the root of the given chunk. Traversal instructions are "1" for left, and - // "0" for right. TODO: Compact CHUNK_ID into bitset for size optimization - // as a subtree can be big hence traversal instructions for the deepest chunks - // tx: Transaction. Function returns the data by opening merks at given tx. - // Returns the Chunk proof operators for the requested chunk encoded in bytes + /// Fetch a chunk by global chunk ID (should be called by ABCI when the + /// `LoadSnapshotChunk` method is invoked). + /// + /// # Parameters + /// - `global_chunk_id`: Global chunk ID in the following format: + /// `[SUBTREE_PREFIX:SIZE_ROOT_KEY:ROOT_KEY:IS_SUM_TREE:CHUNK_ID]` + /// - **SUBTREE_PREFIX**: 32 bytes (mandatory) - All zeros indicate the + /// Root subtree. + /// - **SIZE_ROOT_KEY**: 1 byte - Size of `ROOT_KEY` in bytes. + /// - **ROOT_KEY**: `SIZE_ROOT_KEY` bytes (optional). + /// - **IS_SUM_TREE**: 1 byte (mandatory) - Marks if the tree is a sum + /// tree or not. + /// - **CHUNK_ID**: 0 or more bytes (optional) - Traversal instructions to + /// the root of the given chunk. Traversal instructions are represented + /// as "1" for left and "0" for right. + /// - TODO: Compact `CHUNK_ID` into a bitset for size optimization as a + /// subtree can be large, and traversal instructions for the deepest + /// chunks could consume significant space. + /// + /// - `transaction`: The transaction used to fetch the chunk. + /// - `version`: The version of the state sync protocol. + /// - `grove_version`: The version of GroveDB. + /// + /// # Returns + /// Returns the chunk proof operators for the requested chunk, encoded as + /// bytes. pub fn fetch_chunk( &self, global_chunk_id: &[u8], - tx: TransactionArg, + transaction: TransactionArg, version: u16, grove_version: &GroveVersion, ) -> Result, Error> { @@ -277,99 +81,147 @@ impl GroveDb { )); } - let root_app_hash = self.root_hash(tx, grove_version).value?; - let (chunk_prefix, chunk_id) = - replication::util_split_global_chunk_id(global_chunk_id, &root_app_hash)?; - - let subtrees_metadata = self.get_subtrees_metadata(tx, grove_version)?; - - match subtrees_metadata.data.get(&chunk_prefix) { - Some(path_data) => { - let subtree = &path_data.0; - let subtree_path: Vec<&[u8]> = subtree.iter().map(|vec| vec.as_slice()).collect(); - let path: &[&[u8]] = &subtree_path; - - match tx { - None => { - let merk = self - .open_non_transactional_merk_at_path(path.into(), None, grove_version) - .value?; - - if merk.is_empty_tree().unwrap() { - return Ok(vec![]); - } - - let chunk_producer_res = ChunkProducer::new(&merk); - match chunk_producer_res { - Ok(mut chunk_producer) => { - let chunk_res = chunk_producer.chunk(&chunk_id, grove_version); - match chunk_res { - Ok((chunk, _)) => match util_encode_vec_ops(chunk) { - Ok(op_bytes) => Ok(op_bytes), - Err(_) => Err(Error::CorruptedData( - "Unable to create to load chunk".to_string(), - )), - }, - Err(_) => Err(Error::CorruptedData( - "Unable to create to load chunk".to_string(), - )), - } - } - Err(_) => Err(Error::CorruptedData( - "Unable to create Chunk producer".to_string(), - )), - } - } - Some(t) => { - let merk = self - .open_transactional_merk_at_path(path.into(), t, None, grove_version) - .value?; - - if merk.is_empty_tree().unwrap() { - return Ok(vec![]); - } - - let chunk_producer_res = ChunkProducer::new(&merk); - match chunk_producer_res { - Ok(mut chunk_producer) => { - let chunk_res = chunk_producer.chunk(&chunk_id, grove_version); - match chunk_res { - Ok((chunk, _)) => match util_encode_vec_ops(chunk) { - Ok(op_bytes) => Ok(op_bytes), - Err(_) => Err(Error::CorruptedData( - "Unable to create to load chunk".to_string(), - )), - }, - Err(_) => Err(Error::CorruptedData( - "Unable to create to load chunk".to_string(), - )), - } - } - Err(_) => Err(Error::CorruptedData( - "Unable to create Chunk producer".to_string(), - )), - } - } - } + let root_app_hash = self.root_hash(transaction, grove_version).value?; + let (chunk_prefix, root_key, is_sum_tree, chunk_id) = + utils::decode_global_chunk_id(global_chunk_id, &root_app_hash)?; + + // TODO: Refactor this by writing fetch_chunk_inner (as only merk constructor + // and type are different) + if let Some(tx) = transaction { + let merk = self + .open_transactional_merk_by_prefix( + chunk_prefix, + root_key, + is_sum_tree, + tx, + None, + grove_version, + ) + .value + .map_err(|e| { + Error::CorruptedData(format!( + "failed to open merk by prefix tx:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); } - None => Err(Error::CorruptedData("Prefix not found".to_string())), + + let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { + Error::CorruptedData(format!( + "failed to create chunk producer by prefix tx:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + let (chunk, _) = chunk_producer + .chunk(&chunk_id, grove_version) + .map_err(|e| { + Error::CorruptedData(format!( + "failed to apply chunk:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + Error::CorruptedData(format!( + "failed to encode chunk ops:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + Ok(op_bytes) + } else { + let merk = self + .open_non_transactional_merk_by_prefix( + chunk_prefix, + root_key, + is_sum_tree, + None, + grove_version, + ) + .value + .map_err(|e| { + Error::CorruptedData(format!( + "failed to open merk by prefix non-tx:{} with:{}", + e, + hex::encode(chunk_prefix) + )) + })?; + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } + + let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { + Error::CorruptedData(format!( + "failed to create chunk producer by prefix non-tx:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + let (chunk, _) = chunk_producer + .chunk(&chunk_id, grove_version) + .map_err(|e| { + Error::CorruptedData(format!( + "failed to apply chunk:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + Error::CorruptedData(format!( + "failed to encode chunk ops:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + Ok(op_bytes) } } - // Starts a state sync process (should be called by ABCI when OfferSnapshot - // method is called) Params: - // state_sync_info: Consumed StateSyncInfo - // app_hash: Snapshot's AppHash - // tx: Transaction for the state sync - // Returns the StateSyncInfo transferring ownership back to the caller) - pub fn start_snapshot_syncing<'db>( - &'db self, - mut state_sync_info: MultiStateSyncInfo<'db>, + /// Starts a state synchronization process for a snapshot with the given + /// `app_hash` root hash. This method should be called by ABCI when the + /// `OfferSnapshot` method is invoked. + /// + /// # Parameters + /// - `app_hash`: The root hash of the application state to synchronize. + /// - `version`: The version of the state sync protocol to use. + /// - `grove_version`: The version of GroveDB being used. + /// + /// # Returns + /// - `Ok(Pin>)`: A pinned, boxed + /// `MultiStateSyncSession` representing the new sync session. This + /// session allows for managing the synchronization process. + /// - `Err(Error)`: An error indicating why the state sync process could not + /// be started. + /// + /// # Behavior + /// - Initiates the state synchronization process by preparing the necessary + /// data and resources. + /// - Returns the first set of global chunk IDs that can be fetched from + /// available sources. + /// - A new sync session is created and managed internally, facilitating + /// further synchronization. + /// + /// # Usage + /// This method is typically called as part of the ABCI `OfferSnapshot` + /// workflow when a new snapshot synchronization process is required to + /// bring the application state up to date. + /// + /// # Notes + /// - The returned `MultiStateSyncSession` is pinned because its lifetime + /// may depend on asynchronous operations or other system resources that + /// require it to remain immovable in memory. + /// - Ensure that `app_hash` corresponds to a valid snapshot to avoid + /// errors. + pub fn start_snapshot_syncing( + &self, app_hash: CryptoHash, - tx: &'db Transaction, version: u16, grove_version: &GroveVersion, - ) -> Result { + ) -> Result>, Error> { check_grovedb_v0!( "start_snapshot_syncing", grove_version @@ -383,277 +235,216 @@ impl GroveDb { "Unsupported state sync protocol version".to_string(), )); } - if version != state_sync_info.version { - return Err(Error::CorruptedData( - "Unsupported state sync protocol version".to_string(), - )); + + let root_prefix = [0u8; 32]; + + let mut session = self.start_syncing_session(app_hash); + + session.add_subtree_sync_info( + self, + SubtreePath::empty(), + app_hash, + None, + root_prefix, + grove_version, + )?; + + Ok(session) + } +} + +pub(crate) mod utils { + use grovedb_merk::{ + ed::Encode, + proofs::{Decoder, Op}, + }; + + use crate::{replication::ChunkIdentifier, Error}; + + /// Converts a path, represented as a slice of byte vectors (`&[Vec]`), + /// into a human-readable string representation for debugging purposes. + /// + /// # Parameters + /// - `path`: A slice of byte vectors where each vector represents a segment + /// of the path. + /// + /// # Returns + /// - `Vec`: A vector of strings where each string is a + /// human-readable representation of a corresponding segment in the input + /// path. If a segment contains invalid UTF-8, it is replaced with the + /// placeholder string `""`. + /// + /// # Behavior + /// - Each byte vector in the path is interpreted as a UTF-8 string. If the + /// conversion fails, the placeholder `""` is used instead. + /// - This function is primarily intended for debugging and logging. + /// + /// # Notes + /// - This function does not handle or normalize paths; it only provides a + /// human-readable representation. + /// - Be cautious when using this for paths that might contain sensitive + /// data, as the output could be logged. + pub fn path_to_string(path: &[Vec]) -> Vec { + let mut subtree_path_str: Vec = vec![]; + for subtree in path { + let string = std::str::from_utf8(subtree).unwrap_or(""); + subtree_path_str.push(string.to_string()); } + subtree_path_str + } - if !state_sync_info.current_prefixes.is_empty() - || !state_sync_info.processed_prefixes.is_empty() - { - return Err(Error::InternalError( - "GroveDB has already started a snapshot syncing".to_string(), + /// Decodes a given global chunk ID into its components: + /// `[SUBTREE_PREFIX:SIZE_ROOT_KEY:ROOT_KEY:IS_SUM_TREE:CHUNK_ID]`. + /// + /// # Parameters + /// - `global_chunk_id`: A byte slice representing the global chunk ID to + /// decode. + /// - `app_hash`: The application hash, which may be required for validation + /// or context. + /// + /// # Returns + /// - `Ok(ChunkIdentifier)`: A tuple containing the decoded components: + /// - `SUBTREE_PREFIX`: A 32-byte prefix of the subtree. + /// - `SIZE_ROOT_KEY`: Size of the root key (derived from `ROOT_KEY` + /// length). + /// - `ROOT_KEY`: Optional root key as a byte vector. + /// - `IS_SUM_TREE`: A boolean indicating whether the tree is a sum tree. + /// - `CHUNK_ID`: Traversal instructions as a byte vector. + /// - `Err(Error)`: An error if the global chunk ID could not be decoded. + pub fn decode_global_chunk_id( + global_chunk_id: &[u8], + app_hash: &[u8], + ) -> Result { + let chunk_prefix_length: usize = 32; + if global_chunk_id.len() < chunk_prefix_length { + return Err(Error::CorruptedData( + "expected global chunk id of at least 32 length".to_string(), )); } - println!( - " starting:{:?}...", - replication::util_path_to_string(&[]) - ); - - let mut root_prefix_state_sync_info = SubtreeStateSyncInfo::default(); - let root_prefix = [0u8; 32]; - if let Ok(merk) = self.open_merk_for_replication(SubtreePath::empty(), tx, grove_version) { - let restorer = Restorer::new(merk, app_hash, None); - root_prefix_state_sync_info.restorer = Some(restorer); - root_prefix_state_sync_info.pending_chunks.insert(vec![]); - state_sync_info - .current_prefixes - .insert(root_prefix, root_prefix_state_sync_info); - state_sync_info.app_hash = app_hash; - } else { - return Err(Error::InternalError( - "Unable to open merk for replication".to_string(), - )); + if global_chunk_id == app_hash { + let root_chunk_prefix_key: crate::SubtreePrefix = [0u8; 32]; + return Ok((root_chunk_prefix_key, None, false, vec![])); } - Ok(state_sync_info) - } + let (chunk_prefix_key, remaining) = global_chunk_id.split_at(chunk_prefix_length); - // Apply a chunk (should be called by ABCI when ApplySnapshotChunk method is - // called) Params: - // state_sync_info: Consumed MultiStateSyncInfo - // global_chunk_id: Global chunk id - // chunk: Chunk proof operators encoded in bytes - // tx: Transaction for the state sync - // Returns the next set of global chunk ids that can be fetched from sources (+ - // the MultiStateSyncInfo transferring ownership back to the caller) - pub fn apply_chunk<'db>( - &'db self, - mut state_sync_info: MultiStateSyncInfo<'db>, - global_chunk_id: &[u8], - chunk: Vec, - tx: &'db Transaction, - version: u16, - grove_version: &GroveVersion, - ) -> Result<(Vec>, MultiStateSyncInfo), Error> { - check_grovedb_v0!( - "apply_chunk", - grove_version.grovedb_versions.replication.apply_chunk - ); - // For now, only CURRENT_STATE_SYNC_VERSION is supported - if version != CURRENT_STATE_SYNC_VERSION { + let root_key_size_length: usize = 1; + if remaining.len() < root_key_size_length { return Err(Error::CorruptedData( - "Unsupported state sync protocol version".to_string(), + "unable to decode root key size".to_string(), )); } - if version != state_sync_info.version { + let (root_key_size, remaining) = remaining.split_at(root_key_size_length); + if remaining.len() < root_key_size[0] as usize { return Err(Error::CorruptedData( - "Unsupported state sync protocol version".to_string(), + "unable to decode root key".to_string(), )); } - - let mut next_chunk_ids = vec![]; - - let (chunk_prefix, chunk_id) = - replication::util_split_global_chunk_id(global_chunk_id, &state_sync_info.app_hash)?; - - if state_sync_info.current_prefixes.is_empty() { - return Err(Error::InternalError( - "GroveDB is not in syncing mode".to_string(), + let (root_key, remaining) = remaining.split_at(root_key_size[0] as usize); + let is_sum_tree_length: usize = 1; + if remaining.len() < is_sum_tree_length { + return Err(Error::CorruptedData( + "unable to decode root key".to_string(), )); } - if let Some(subtree_state_sync) = state_sync_info.current_prefixes.remove(&chunk_prefix) { - if let Ok((res, mut new_subtree_state_sync)) = - self.apply_inner_chunk(subtree_state_sync, &chunk_id, chunk, grove_version) - { - if !res.is_empty() { - for local_chunk_id in res.iter() { - let mut next_global_chunk_id = chunk_prefix.to_vec(); - next_global_chunk_id.extend(local_chunk_id.to_vec()); - next_chunk_ids.push(next_global_chunk_id); - } - - // re-insert subtree_state_sync in state_sync_info - state_sync_info - .current_prefixes - .insert(chunk_prefix, new_subtree_state_sync); - Ok((next_chunk_ids, state_sync_info)) - } else { - if !new_subtree_state_sync.pending_chunks.is_empty() { - // re-insert subtree_state_sync in state_sync_info - state_sync_info - .current_prefixes - .insert(chunk_prefix, new_subtree_state_sync); - return Ok((vec![], state_sync_info)); - } - - // Subtree is finished. We can save it. - match new_subtree_state_sync.restorer.take() { - None => Err(Error::InternalError( - "Unable to finalize subtree".to_string(), - )), - Some(restorer) => { - if (new_subtree_state_sync.num_processed_chunks > 0) - && (restorer.finalize(grove_version).is_err()) - { - return Err(Error::InternalError( - "Unable to finalize Merk".to_string(), - )); - } - state_sync_info.processed_prefixes.insert(chunk_prefix); - - // Subtree was successfully save. Time to discover new subtrees that - // need to be processed - let subtrees_metadata = - self.get_subtrees_metadata(Some(tx), grove_version)?; - if let Some(value) = subtrees_metadata.data.get(&chunk_prefix) { - println!( - " path:{:?} done (num_processed_chunks:{:?})", - replication::util_path_to_string(&value.0), - new_subtree_state_sync.num_processed_chunks - ); - } - - if let Ok((res, new_state_sync_info)) = self.discover_subtrees( - state_sync_info, - subtrees_metadata, - tx, - grove_version, - ) { - next_chunk_ids.extend(res); - Ok((next_chunk_ids, new_state_sync_info)) - } else { - Err(Error::InternalError( - "Unable to discover Subtrees".to_string(), - )) - } - } - } - } - } else { - Err(Error::InternalError( - "Unable to process incoming chunk".to_string(), - )) - } + let (is_sum_tree, chunk_id) = remaining.split_at(is_sum_tree_length); + + let subtree_prefix: crate::SubtreePrefix = chunk_prefix_key + .try_into() + .map_err(|_| Error::CorruptedData("unable to construct subtree".to_string()))?; + + if !root_key.is_empty() { + Ok(( + subtree_prefix, + Some(root_key.to_vec()), + is_sum_tree[0] != 0, + chunk_id.to_vec(), + )) } else { - Err(Error::InternalError("Invalid incoming prefix".to_string())) + Ok((subtree_prefix, None, is_sum_tree[0] != 0, chunk_id.to_vec())) } } - // Apply a chunk using the given SubtreeStateSyncInfo - // state_sync_info: Consumed SubtreeStateSyncInfo - // chunk_id: Local chunk id - // chunk_data: Chunk proof operators encoded in bytes - // Returns the next set of global chunk ids that can be fetched from sources (+ - // the SubtreeStateSyncInfo transferring ownership back to the caller) - fn apply_inner_chunk<'db>( - &'db self, - mut state_sync_info: SubtreeStateSyncInfo<'db>, - chunk_id: &[u8], - chunk_data: Vec, - grove_version: &GroveVersion, - ) -> Result<(Vec>, SubtreeStateSyncInfo), Error> { + /// Encodes the given components into a global chunk ID in the format: + /// `[SUBTREE_PREFIX:SIZE_ROOT_KEY:ROOT_KEY:IS_SUM_TREE:CHUNK_ID]`. + /// + /// # Parameters + /// - `subtree_prefix`: A 32-byte array representing the prefix of the + /// subtree. + /// - `root_key_opt`: An optional root key as a byte vector. + /// - `is_sum_tree`: A boolean indicating whether the tree is a sum tree. + /// - `chunk_id`: A byte vector representing the traversal instructions. + /// + /// # Returns + /// - A `Vec` containing the encoded global chunk ID. + pub fn encode_global_chunk_id( + subtree_prefix: [u8; blake3::OUT_LEN], + root_key_opt: Option>, + is_sum_tree: bool, + chunk_id: Vec, + ) -> Vec { let mut res = vec![]; - match &mut state_sync_info.restorer { - Some(restorer) => { - if !state_sync_info.pending_chunks.contains(chunk_id) { - return Err(Error::InternalError( - "Incoming global_chunk_id not expected".to_string(), - )); - } - state_sync_info.pending_chunks.remove(chunk_id); - if !chunk_data.is_empty() { - match util_decode_vec_ops(chunk_data) { - Ok(ops) => { - match restorer.process_chunk(chunk_id, ops, grove_version) { - Ok(next_chunk_ids) => { - state_sync_info.num_processed_chunks += 1; - for next_chunk_id in next_chunk_ids { - state_sync_info - .pending_chunks - .insert(next_chunk_id.clone()); - res.push(next_chunk_id); - } - } - _ => { - return Err(Error::InternalError( - "Unable to process incoming chunk".to_string(), - )); - } - }; - } - Err(_) => { - return Err(Error::CorruptedData( - "Unable to decode incoming chunk".to_string(), - )); - } - } - } - } - _ => { - return Err(Error::InternalError( - "Invalid internal state (restorer".to_string(), - )); - } + res.extend(subtree_prefix); + + if let Some(root_key) = root_key_opt { + res.push(root_key.len() as u8); + res.extend(root_key); + } else { + res.push(0u8); + } + + let mut is_sum_tree_v = 0u8; + if is_sum_tree { + is_sum_tree_v = 1u8; } + res.push(is_sum_tree_v); + + res.extend(chunk_id.to_vec()); - Ok((res, state_sync_info)) + res } - // Prepares SubtreeStateSyncInfos for the freshly discovered subtrees in - // subtrees_metadata and returns the root global chunk ids for all of those - // new subtrees. state_sync_info: Consumed MultiStateSyncInfo - // subtrees_metadata: Metadata about discovered subtrees - // chunk_data: Chunk proof operators - // Returns the next set of global chunk ids that can be fetched from sources (+ - // the MultiStateSyncInfo transferring ownership back to the caller) - fn discover_subtrees<'db>( - &'db self, - mut state_sync_info: MultiStateSyncInfo<'db>, - subtrees_metadata: SubtreesMetadata, - tx: &'db Transaction, - grove_version: &GroveVersion, - ) -> Result<(Vec>, MultiStateSyncInfo), Error> { + /// Encodes a vector of operations (`Vec`) into a byte vector. + /// + /// # Parameters + /// - `chunk`: A vector of `Op` operations to be encoded. + /// + /// # Returns + /// - `Ok(Vec)`: A byte vector representing the encoded operations. + /// - `Err(Error)`: An error if the encoding process fails. + pub fn encode_vec_ops(chunk: Vec) -> Result, Error> { let mut res = vec![]; + for op in chunk { + op.encode_into(&mut res) + .map_err(|e| Error::CorruptedData(format!("unable to encode chunk: {}", e)))?; + } + Ok(res) + } - for (prefix, prefix_metadata) in &subtrees_metadata.data { - if !state_sync_info.processed_prefixes.contains(prefix) - && !state_sync_info.current_prefixes.contains_key(prefix) - { - let (current_path, s_actual_value_hash, s_elem_value_hash) = &prefix_metadata; - - let subtree_path: Vec<&[u8]> = - current_path.iter().map(|vec| vec.as_slice()).collect(); - let path: &[&[u8]] = &subtree_path; - println!( - " path:{:?} starting...", - replication::util_path_to_string(&prefix_metadata.0) - ); - - let mut subtree_state_sync_info = SubtreeStateSyncInfo::default(); - if let Ok(merk) = self.open_merk_for_replication(path.into(), tx, grove_version) { - let restorer = - Restorer::new(merk, *s_elem_value_hash, Some(*s_actual_value_hash)); - subtree_state_sync_info.restorer = Some(restorer); - subtree_state_sync_info.pending_chunks.insert(vec![]); - - state_sync_info - .current_prefixes - .insert(*prefix, subtree_state_sync_info); - - let root_chunk_prefix = prefix.to_vec(); - res.push(root_chunk_prefix.to_vec()); - } else { - return Err(Error::InternalError( - "Unable to open Merk for replication".to_string(), - )); + /// Decodes a byte vector into a vector of operations (`Vec`). + /// + /// # Parameters + /// - `chunk`: A byte vector representing encoded operations. + /// + /// # Returns + /// - `Ok(Vec)`: A vector of decoded `Op` operations. + /// - `Err(Error)`: An error if the decoding process fails. + pub fn decode_vec_ops(chunk: Vec) -> Result, Error> { + let decoder = Decoder::new(&chunk); + let mut res = vec![]; + for op in decoder { + match op { + Ok(op) => res.push(op), + Err(e) => { + return Err(Error::CorruptedData(format!( + "unable to decode chunk: {}", + e + ))); } } } - - Ok((res, state_sync_info)) + Ok(res) } } diff --git a/grovedb/src/replication/state_sync_session.rs b/grovedb/src/replication/state_sync_session.rs new file mode 100644 index 00000000..59d93316 --- /dev/null +++ b/grovedb/src/replication/state_sync_session.rs @@ -0,0 +1,618 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt, + marker::PhantomPinned, + pin::Pin, +}; + +use grovedb_merk::{ + tree::{kv::ValueDefinedCostType, value_hash}, + CryptoHash, Restorer, +}; +use grovedb_path::SubtreePath; +use grovedb_storage::{ + rocksdb_storage::{PrefixedRocksDbImmediateStorageContext, RocksDbStorage}, + StorageContext, +}; +use grovedb_version::version::GroveVersion; + +use super::{ + utils::{decode_vec_ops, encode_global_chunk_id, path_to_string}, + CURRENT_STATE_SYNC_VERSION, +}; +use crate::{replication, Element, Error, GroveDb, Transaction}; + +pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; + +/// Struct governing the state synchronization of one subtree. +struct SubtreeStateSyncInfo<'db> { + /// Current Chunk restorer + restorer: Restorer>, + + /// Set of global chunk ids requested to be fetched and pending for + /// processing. For the description of global chunk id check + /// fetch_chunk(). + pending_chunks: BTreeSet>, + + /// Tree root key + root_key: Option>, + + /// Is Sum tree? + is_sum_tree: bool, + + /// Path of current tree + current_path: Vec>, + + /// Number of processed chunks in current prefix (Path digest) + num_processed_chunks: usize, +} + +impl SubtreeStateSyncInfo<'_> { + /// Applies a chunk using the given `SubtreeStateSyncInfo`. + /// + /// # Parameters + /// - `chunk_id`: A byte slice representing the local chunk ID to be + /// applied. + /// - `chunk_data`: A vector of bytes containing the chunk proof operators, + /// encoded as bytes. + /// - `grove_version`: A reference to the `GroveVersion` being used for + /// synchronization. + /// + /// # Returns + /// - `Ok(Vec>)`: A vector of global chunk IDs (each represented as + /// a vector of bytes) that can be fetched from sources for further + /// synchronization. Ownership of the `SubtreeStateSyncInfo` is + /// transferred back to the caller. + /// - `Err(Error)`: An error if the chunk cannot be applied. + /// + /// # Behavior + /// - The function consumes the provided `SubtreeStateSyncInfo` to apply the + /// given chunk. + /// - Once the chunk is applied, the function calculates and returns the + /// next set of global chunk IDs required for further state + /// synchronization. + /// + /// # Usage + /// This function is called as part of the state sync process to apply + /// received chunks and advance the synchronization state. + /// + /// # Notes + /// - Ensure that the `chunk_data` is correctly encoded and matches the + /// expected format. + /// - The function modifies the state of the synchronization process, so it + /// must be used carefully to maintain correctness. + fn apply_inner_chunk( + &mut self, + chunk_id: &[u8], + chunk_data: Vec, + grove_version: &GroveVersion, + ) -> Result>, Error> { + let mut res = vec![]; + + if !self.pending_chunks.contains(chunk_id) { + return Err(Error::InternalError( + "Incoming global_chunk_id not expected".to_string(), + )); + } + self.pending_chunks.remove(chunk_id); + if !chunk_data.is_empty() { + match decode_vec_ops(chunk_data) { + Ok(ops) => { + match self.restorer.process_chunk(chunk_id, ops, grove_version) { + Ok(next_chunk_ids) => { + self.num_processed_chunks += 1; + for next_chunk_id in next_chunk_ids { + self.pending_chunks.insert(next_chunk_id.clone()); + res.push(next_chunk_id); + } + } + _ => { + return Err(Error::InternalError( + "Unable to process incoming chunk".to_string(), + )); + } + }; + } + Err(_) => { + return Err(Error::CorruptedData( + "Unable to decode incoming chunk".to_string(), + )); + } + } + } + + Ok(res) + } +} + +impl<'tx> SubtreeStateSyncInfo<'tx> { + pub fn new(restorer: Restorer>) -> Self { + SubtreeStateSyncInfo { + restorer, + root_key: None, + is_sum_tree: false, + pending_chunks: Default::default(), + current_path: vec![], + num_processed_chunks: 0, + } + } +} + +/// Struct governing the state synchronization process. +pub struct MultiStateSyncSession<'db> { + /// Map of currently processing subtrees. + /// Keys are `SubtreePrefix` (path digests), and values are + /// `SubtreeStateSyncInfo` for each subtree. + current_prefixes: BTreeMap>, + + /// Set of processed prefixes, represented as `SubtreePrefix` (path + /// digests). + processed_prefixes: BTreeSet, + + /// Root application hash (`app_hash`). + app_hash: [u8; 32], + + /// Version of the state synchronization protocol. + pub(crate) version: u16, + + /// Transaction used for the synchronization process. + /// This is placed last to ensure it is dropped last. + transaction: Transaction<'db>, + + /// Marker to ensure this struct is not moved in memory. + _pin: PhantomPinned, +} + +impl<'db> MultiStateSyncSession<'db> { + /// Initializes a new state sync session. + pub fn new(transaction: Transaction<'db>, app_hash: [u8; 32]) -> Pin> { + Box::pin(MultiStateSyncSession { + transaction, + current_prefixes: Default::default(), + processed_prefixes: Default::default(), + app_hash, + version: CURRENT_STATE_SYNC_VERSION, + _pin: PhantomPinned, + }) + } + + pub fn is_empty(&self) -> bool { + self.current_prefixes.is_empty() + } + + pub fn is_sync_completed(&self) -> bool { + for (_, subtree_state_info) in self.current_prefixes.iter() { + if !subtree_state_info.pending_chunks.is_empty() { + return false; + } + } + + true + } + + pub fn into_transaction(self: Pin>) -> Transaction<'db> { + // SAFETY: the struct isn't used anymore and no one will refer to transaction + // address again + unsafe { Pin::into_inner_unchecked(self) }.transaction + } + + /// Adds synchronization information for a subtree into the current + /// synchronization session. + /// + /// This function interacts with a `GroveDb` database to open a Merk tree at + /// the specified path, calculate and verify its cryptographic hashes, + /// and update the session state with the relevant synchronization + /// information. The function generates and returns the global chunk ID for + /// the subtree. + /// + /// # Parameters + /// - `self`: A pinned, boxed instance of the `MultiStateSyncSession`. + /// - `db`: A reference to the `GroveDb` instance. + /// - `path`: The path to the subtree as a `SubtreePath`. + /// - `hash`: The expected cryptographic hash of the subtree. + /// - `actual_hash`: An optional actual cryptographic hash to compare + /// against the expected hash. + /// - `chunk_prefix`: A 32-byte prefix used for identifying chunks in the + /// synchronization process. + /// - `grove_version`: The GroveDB version to use for processing. + /// + /// # Returns + /// - `Ok(Vec)`: On success, returns the encoded global chunk ID for the + /// subtree. + /// - `Err(Error)`: If the Merk tree cannot be opened or synchronization + /// information cannot be added. + /// + /// # Errors + /// This function returns an error if: + /// - The Merk tree at the specified path cannot be opened. + /// - Any synchronization-related operations fail. + /// - Internal errors occur during processing. + /// + /// # Safety + /// - This function uses unsafe code to create a reference to the + /// transaction. Ensure that the transaction is properly managed and the + /// lifetime guarantees are respected. + pub fn add_subtree_sync_info<'b, B: AsRef<[u8]>>( + self: &mut Pin>>, + db: &'db GroveDb, + path: SubtreePath<'b, B>, + hash: CryptoHash, + actual_hash: Option, + chunk_prefix: [u8; 32], + grove_version: &GroveVersion, + ) -> Result, Error> { + let transaction_ref: &'db Transaction<'db> = unsafe { + let tx: &Transaction<'db> = &self.as_ref().transaction; + &*(tx as *const _) + }; + + if let Ok((merk, root_key, is_sum_tree)) = + db.open_merk_for_replication(path.clone(), transaction_ref, grove_version) + { + let restorer = Restorer::new(merk, hash, actual_hash); + let mut sync_info = SubtreeStateSyncInfo::new(restorer); + sync_info.pending_chunks.insert(vec![]); + sync_info.root_key = root_key.clone(); + sync_info.is_sum_tree = is_sum_tree; + sync_info.current_path = path.to_vec(); + self.as_mut() + .current_prefixes() + .insert(chunk_prefix, sync_info); + Ok(encode_global_chunk_id( + chunk_prefix, + root_key, + is_sum_tree, + vec![], + )) + } else { + Err(Error::InternalError( + "Unable to open merk for replication".to_string(), + )) + } + } + + fn current_prefixes( + self: Pin<&mut MultiStateSyncSession<'db>>, + ) -> &mut BTreeMap> { + // SAFETY: no memory-sensitive assumptions are made about fields except the + // `transaciton` so it will be safe to modify them + &mut unsafe { self.get_unchecked_mut() }.current_prefixes + } + + fn processed_prefixes( + self: Pin<&mut MultiStateSyncSession<'db>>, + ) -> &mut BTreeSet { + // SAFETY: no memory-sensitive assumptions are made about fields except the + // `transaciton` so it will be safe to modify them + &mut unsafe { self.get_unchecked_mut() }.processed_prefixes + } + + /// Applies a chunk during the state synchronization process. + /// This method should be called by ABCI when the `ApplySnapshotChunk` + /// method is invoked. + /// + /// # Parameters + /// - `self`: A pinned mutable reference to the `MultiStateSyncSession`. + /// - `db`: A reference to the `GroveDb` instance used for synchronization. + /// - `global_chunk_id`: A byte slice representing the global chunk ID being + /// applied. + /// - `chunk`: A vector of bytes containing the encoded proof for the chunk. + /// - `version`: The state synchronization protocol version being used. + /// - `grove_version`: A reference to the `GroveVersion` specifying the + /// GroveDB version. + /// + /// # Returns + /// - `Ok(Vec>)`: A vector of global chunk IDs (each represented as + /// a vector of bytes) that can be fetched from sources for further + /// synchronization. + /// - `Err(Error)`: An error if the chunk application fails or if the chunk + /// proof is invalid. + /// + /// # Behavior + /// - This method applies the given chunk using the provided + /// `global_chunk_id` and its corresponding proof data (`chunk`). + /// - Once the chunk is applied successfully, it calculates and returns the + /// next set of global chunk IDs required for further synchronization. + /// + /// # Notes + /// - Ensure the `chunk` is correctly encoded and matches the expected proof + /// format. + /// - This function modifies the state of the synchronization session, so it + /// must be used carefully to maintain correctness and avoid errors. + /// - The pinned `self` ensures that the session cannot be moved in memory, + /// preserving consistency during the synchronization process. + pub fn apply_chunk( + self: &mut Pin>>, + db: &'db GroveDb, + global_chunk_id: &[u8], + chunk: Vec, + version: u16, + grove_version: &GroveVersion, + ) -> Result>, Error> { + // For now, only CURRENT_STATE_SYNC_VERSION is supported + if version != CURRENT_STATE_SYNC_VERSION { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + if version != self.version { + return Err(Error::CorruptedData( + "Unsupported state sync protocol version".to_string(), + )); + } + + let mut next_chunk_ids = vec![]; + + let (chunk_prefix, _, _, chunk_id) = + replication::utils::decode_global_chunk_id(global_chunk_id, &self.app_hash)?; + + if self.is_empty() { + return Err(Error::InternalError( + "GroveDB is not in syncing mode".to_string(), + )); + } + + let current_prefixes = self.as_mut().current_prefixes(); + let Some(subtree_state_sync) = current_prefixes.get_mut(&chunk_prefix) else { + return Err(Error::InternalError( + "Unable to process incoming chunk".to_string(), + )); + }; + let Ok(res) = subtree_state_sync.apply_inner_chunk(&chunk_id, chunk, grove_version) else { + return Err(Error::InternalError("Invalid incoming prefix".to_string())); + }; + + if !res.is_empty() { + for local_chunk_id in res.iter() { + next_chunk_ids.push(encode_global_chunk_id( + chunk_prefix, + subtree_state_sync.root_key.clone(), + subtree_state_sync.is_sum_tree, + local_chunk_id.clone(), + )); + } + + Ok(next_chunk_ids) + } else { + if !subtree_state_sync.pending_chunks.is_empty() { + return Ok(vec![]); + } + + let completed_path = subtree_state_sync.current_path.clone(); + + // Subtree is finished. We can save it. + if subtree_state_sync.num_processed_chunks > 0 { + if let Some(prefix_data) = current_prefixes.remove(&chunk_prefix) { + if let Err(err) = prefix_data.restorer.finalize(grove_version) { + return Err(Error::InternalError(format!( + "Unable to finalize Merk: {:?}", + err + ))); + } + } else { + return Err(Error::InternalError(format!( + "Prefix {:?} does not exist in current_prefixes", + chunk_prefix + ))); + } + } + + self.as_mut().processed_prefixes().insert(chunk_prefix); + + let new_subtrees_metadata = + self.discover_new_subtrees_metadata(db, &completed_path, grove_version)?; + + if let Ok(res) = + self.prepare_sync_state_sessions(db, new_subtrees_metadata, grove_version) + { + next_chunk_ids.extend(res); + Ok(next_chunk_ids) + } else { + Err(Error::InternalError( + "Unable to discover Subtrees".to_string(), + )) + } + } + } + + /// Discovers new subtrees at the given path that need to be synchronized. + /// + /// # Parameters + /// - `self`: A pinned mutable reference to the `MultiStateSyncSession`. + /// - `db`: A reference to the `GroveDb` instance being used for + /// synchronization. + /// - `path_vec`: A vector of byte vectors representing the path where + /// subtrees should be discovered. + /// - `grove_version`: A reference to the `GroveVersion` specifying the + /// GroveDB version. + /// + /// # Returns + /// - `Ok(SubtreesMetadata)`: Metadata about the discovered subtrees, + /// including information necessary for their synchronization. + /// - `Err(Error)`: An error if the discovery process fails. + /// + /// # Behavior + /// - This function traverses the specified `path_vec` in the database and + /// identifies subtrees that are not yet synchronized. + /// - Returns metadata about these subtrees, which can be used to initiate + /// or manage the synchronization process. + /// + /// # Notes + /// - The `path_vec` should represent a valid path in the GroveDB where + /// subtrees are expected to exist. + /// - Ensure that the GroveDB instance (`db`) and Grove version + /// (`grove_version`) are compatible and up-to-date to avoid errors during + /// discovery. + /// - The function modifies the state of the synchronization session, so it + /// should be used carefully to maintain session integrity. + fn discover_new_subtrees_metadata( + self: &mut Pin>>, + db: &'db GroveDb, + path_vec: &[Vec], + grove_version: &GroveVersion, + ) -> Result { + let transaction_ref: &'db Transaction<'db> = unsafe { + let tx: &Transaction<'db> = &self.as_ref().transaction; + &*(tx as *const _) + }; + let subtree_path: Vec<&[u8]> = path_vec.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + let merk = db + .open_transactional_merk_at_path(path.into(), transaction_ref, None, grove_version) + .value + .map_err(|e| Error::CorruptedData(format!("failed to open merk by path-tx:{}", e)))?; + if merk.is_empty_tree().unwrap() { + return Ok(SubtreesMetadata::default()); + } + let mut subtree_keys = BTreeSet::new(); + + let mut raw_iter = Element::iterator(merk.storage.raw_iter()).unwrap(); + while let Some((key, value)) = raw_iter.next_element(grove_version).unwrap().unwrap() { + if value.is_any_tree() { + subtree_keys.insert(key.to_vec()); + } + } + + let mut subtrees_metadata = SubtreesMetadata::new(); + for subtree_key in &subtree_keys { + if let Ok(Some((elem_value, elem_value_hash))) = merk + .get_value_and_value_hash( + subtree_key.as_slice(), + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .value + { + let actual_value_hash = value_hash(&elem_value).unwrap(); + let mut new_path = path_vec.to_vec(); + new_path.push(subtree_key.to_vec()); + + let subtree_path: Vec<&[u8]> = new_path.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + let prefix = RocksDbStorage::build_prefix(path.as_ref().into()).unwrap(); + + subtrees_metadata.data.insert( + prefix, + (new_path.to_vec(), actual_value_hash, elem_value_hash), + ); + } + } + + Ok(subtrees_metadata) + } + + /// Prepares a synchronization session for the newly discovered subtrees and + /// returns the global chunk IDs of those subtrees. + /// + /// # Parameters + /// - `self`: A pinned mutable reference to the `MultiStateSyncSession`. + /// - `db`: A reference to the `GroveDb` instance used for managing the + /// synchronization process. + /// - `subtrees_metadata`: Metadata about the discovered subtrees that + /// require synchronization. + /// - `grove_version`: A reference to the `GroveVersion` specifying the + /// GroveDB version. + /// + /// # Returns + /// - `Ok(Vec>)`: A vector of global chunk IDs (each represented as + /// a vector of bytes) corresponding to the newly discovered subtrees. + /// These IDs can be fetched from sources to continue the synchronization + /// process. + /// - `Err(Error)`: An error if the synchronization session could not be + /// prepared or if processing the metadata fails. + /// + /// # Behavior + /// - Initializes the synchronization state for the newly discovered + /// subtrees based on the provided metadata. + /// - Calculates and returns the global chunk IDs of these subtrees, + /// enabling further state synchronization. + /// + /// # Notes + /// - Ensure that the `subtrees_metadata` accurately reflects the subtrees + /// requiring synchronization. + /// - This function modifies the state of the synchronization session to + /// include the new subtrees. + /// - Proper handling of the returned global chunk IDs is essential to + /// ensure seamless state synchronization. + fn prepare_sync_state_sessions( + self: &mut Pin>>, + db: &'db GroveDb, + subtrees_metadata: SubtreesMetadata, + grove_version: &GroveVersion, + ) -> Result>, Error> { + let mut res = vec![]; + + for (prefix, prefix_metadata) in &subtrees_metadata.data { + if !self.processed_prefixes.contains(prefix) + && !self.current_prefixes.contains_key(prefix) + { + let (current_path, actual_value_hash, elem_value_hash) = &prefix_metadata; + + let subtree_path: Vec<&[u8]> = + current_path.iter().map(|vec| vec.as_slice()).collect(); + let path: &[&[u8]] = &subtree_path; + + let next_chunks_ids = self.add_subtree_sync_info( + db, + path.into(), + *elem_value_hash, + Some(*actual_value_hash), + *prefix, + grove_version, + )?; + + res.push(next_chunks_ids); + } + } + + Ok(res) + } +} + +/// Struct containing metadata about the current subtrees found in GroveDB. +/// This metadata is used during the state synchronization process to track +/// discovered subtrees and verify their integrity after they are constructed. +pub struct SubtreesMetadata { + /// A map where: + /// - **Key**: `SubtreePrefix` (the path digest of the subtree). + /// - **Value**: A tuple containing: + /// - `Vec>`: The actual path of the subtree in GroveDB. + /// - `CryptoHash`: The parent subtree's actual value hash. + /// - `CryptoHash`: The parent subtree's element value hash. + /// + /// The `parent subtree actual_value_hash` and `parent subtree + /// elem_value_hash` are required to verify the integrity of the newly + /// constructed subtree after synchronization. + pub data: BTreeMap>, CryptoHash, CryptoHash)>, +} + +impl SubtreesMetadata { + pub fn new() -> SubtreesMetadata { + SubtreesMetadata { + data: BTreeMap::new(), + } + } +} + +impl Default for SubtreesMetadata { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Debug for SubtreesMetadata { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (prefix, metadata) in self.data.iter() { + let metadata_path = &metadata.0; + let metadata_path_str = path_to_string(metadata_path); + writeln!( + f, + " prefix:{:?} -> path:{:?}", + hex::encode(prefix), + metadata_path_str, + )?; + } + Ok(()) + } +} diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index 8a91d4f4..44510694 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -58,7 +58,7 @@ use crate::{ const BLAKE_BLOCK_LEN: usize = 64; -pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; +pub type SubtreePrefix = [u8; blake3::OUT_LEN]; fn blake_block_count(len: usize) -> usize { if len == 0 { @@ -472,6 +472,15 @@ impl<'db> Storage<'db> for RocksDbStorage { .map(|prefix| PrefixedRocksDbStorageContext::new(&self.db, prefix, batch)) } + fn get_storage_context_by_subtree_prefix( + &'db self, + prefix: SubtreePrefix, + batch: Option<&'db StorageBatch>, + ) -> CostContext { + PrefixedRocksDbStorageContext::new(&self.db, prefix, batch) + .wrap_with_cost(OperationCost::default()) + } + fn get_transactional_storage_context<'b, B>( &'db self, path: SubtreePath<'b, B>, @@ -486,6 +495,16 @@ impl<'db> Storage<'db> for RocksDbStorage { }) } + fn get_transactional_storage_context_by_subtree_prefix( + &'db self, + prefix: SubtreePrefix, + batch: Option<&'db StorageBatch>, + transaction: &'db Self::Transaction, + ) -> CostContext { + PrefixedRocksDbTransactionContext::new(&self.db, transaction, prefix, batch) + .wrap_with_cost(OperationCost::default()) + } + fn get_immediate_storage_context<'b, B>( &'db self, path: SubtreePath<'b, B>, @@ -499,6 +518,15 @@ impl<'db> Storage<'db> for RocksDbStorage { }) } + fn get_immediate_storage_context_by_subtree_prefix( + &'db self, + prefix: SubtreePrefix, + transaction: &'db Self::Transaction, + ) -> CostContext { + PrefixedRocksDbImmediateStorageContext::new(&self.db, transaction, prefix) + .wrap_with_cost(OperationCost::default()) + } + fn commit_multi_context_batch( &self, batch: StorageBatch, diff --git a/storage/src/storage.rs b/storage/src/storage.rs index 5ef26e06..2795cfc2 100644 --- a/storage/src/storage.rs +++ b/storage/src/storage.rs @@ -43,6 +43,8 @@ use grovedb_visualize::visualize_to_vec; use crate::{worst_case_costs::WorstKeyLength, Error}; +pub type SubtreePrefix = [u8; blake3::OUT_LEN]; + /// Top-level storage_cost abstraction. /// Should be able to hold storage_cost connection and to start transaction when /// needed. All query operations will be exposed using [StorageContext]. @@ -89,6 +91,14 @@ pub trait Storage<'db> { where B: AsRef<[u8]> + 'b; + /// Make storage context for a subtree with prefix, keeping all write + /// operations inside a `batch` if provided. + fn get_storage_context_by_subtree_prefix( + &'db self, + prefix: SubtreePrefix, + batch: Option<&'db StorageBatch>, + ) -> CostContext; + /// Make context for a subtree on transactional data, keeping all write /// operations inside a `batch` if provided. fn get_transactional_storage_context<'b, B>( @@ -100,6 +110,15 @@ pub trait Storage<'db> { where B: AsRef<[u8]> + 'b; + /// Make context for a subtree by prefix on transactional data, keeping all + /// write operations inside a `batch` if provided. + fn get_transactional_storage_context_by_subtree_prefix( + &'db self, + prefix: SubtreePrefix, + batch: Option<&'db StorageBatch>, + transaction: &'db Self::Transaction, + ) -> CostContext; + /// Make context for a subtree on transactional data that will apply all /// operations straight to the storage. fn get_immediate_storage_context<'b, B>( @@ -110,6 +129,14 @@ pub trait Storage<'db> { where B: AsRef<[u8]> + 'b; + /// Make context for a subtree by prefix on transactional data that will + /// apply all operations straight to the storage. + fn get_immediate_storage_context_by_subtree_prefix( + &'db self, + prefix: SubtreePrefix, + transaction: &'db Self::Transaction, + ) -> CostContext; + /// Creates a database checkpoint in a specified path fn create_checkpoint>(&self, path: P) -> Result<(), Error>; diff --git a/tutorials/Cargo.toml b/tutorials/Cargo.toml index 8084a248..7fd4ee81 100644 --- a/tutorials/Cargo.toml +++ b/tutorials/Cargo.toml @@ -14,6 +14,7 @@ grovedb-visualize = { path = "../visualize" } rand = "0.8.5" hex = "0.4" +blake3 = "1.5.1" [workspace] diff --git a/tutorials/src/bin/replication.rs b/tutorials/src/bin/replication.rs index ceeec2f2..74374c33 100644 --- a/tutorials/src/bin/replication.rs +++ b/tutorials/src/bin/replication.rs @@ -1,11 +1,11 @@ use std::collections::VecDeque; use std::path::Path; +use std::time::{Duration, Instant}; use grovedb::{operations::insert::InsertOptions, Element, GroveDb, PathQuery, Query, Transaction}; use grovedb::reference_path::ReferencePathType; use rand::{distributions::Alphanumeric, Rng, }; use grovedb::element::SumValue; use grovedb::replication::CURRENT_STATE_SYNC_VERSION; -use grovedb::replication::MultiStateSyncInfo; use grovedb_version::version::GroveVersion; const MAIN_ΚΕΥ: &[u8] = b"key_main"; @@ -18,6 +18,8 @@ const KEY_INT_REF_0: &[u8] = b"key_int_ref_0"; const KEY_INT_A: &[u8] = b"key_sum_0"; const ROOT_PATH: &[&[u8]] = &[]; +pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; + // Allow insertions to overwrite trees // This is necessary so the tutorial can be rerun easily const INSERT_OPTIONS: Option = Some(InsertOptions { @@ -37,14 +39,14 @@ fn populate_db(grovedb_path: String, grove_version: &GroveVersion) -> GroveDb { let tx = db.start_transaction(); let batch_size = 50; - for i in 0..=5 { + for i in 0..=100 { insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_0], i * batch_size, i * batch_size + batch_size - 1, &tx, &grove_version); } let _ = db.commit_transaction(tx); let tx = db.start_transaction(); let batch_size = 50; - for i in 0..=5 { + for i in 0..=100 { insert_range_values_db(&db, &[MAIN_ΚΕΥ, KEY_INT_1], i * batch_size, i * batch_size + batch_size - 1, &tx, &grove_version); } let _ = db.commit_transaction(tx); @@ -98,15 +100,8 @@ fn main() { let root_hash_destination = db_destination.root_hash(None, grove_version).unwrap().unwrap(); println!("root_hash_destination: {:?}", hex::encode(root_hash_destination)); - println!("\n######### source_subtree_metadata of db_source"); - let subtrees_metadata_source = db_source.get_subtrees_metadata(None, grove_version).unwrap(); - println!("{:?}", subtrees_metadata_source); - println!("\n######### db_checkpoint_0 -> db_destination state sync"); - let state_info = MultiStateSyncInfo::default(); - let tx = db_destination.start_transaction(); - sync_db_demo(&db_checkpoint_0, &db_destination, state_info, &tx, &grove_version).unwrap(); - db_destination.commit_transaction(tx).unwrap().expect("expected to commit transaction"); + sync_db_demo(&db_checkpoint_0, &db_destination, &grove_version).unwrap(); println!("\n######### verify db_destination"); let incorrect_hashes = db_destination.verify_grovedb(None, true, false, grove_version).unwrap(); @@ -246,24 +241,33 @@ fn query_db(db: &GroveDb, path: &[&[u8]], key: Vec, grove_version: &GroveVer fn sync_db_demo( source_db: &GroveDb, target_db: &GroveDb, - state_sync_info: MultiStateSyncInfo, - target_tx: &Transaction, grove_version: &GroveVersion, ) -> Result<(), grovedb::Error> { + let start_time = Instant::now(); let app_hash = source_db.root_hash(None, grove_version).value.unwrap(); - let mut state_sync_info = target_db.start_snapshot_syncing(state_sync_info, app_hash, target_tx, CURRENT_STATE_SYNC_VERSION, grove_version)?; + let mut session = target_db.start_snapshot_syncing(app_hash, CURRENT_STATE_SYNC_VERSION, grove_version)?; let mut chunk_queue : VecDeque> = VecDeque::new(); // The very first chunk to fetch is always identified by the root app_hash chunk_queue.push_back(app_hash.to_vec()); + let mut num_chunks = 0; while let Some(chunk_id) = chunk_queue.pop_front() { + num_chunks += 1; let ops = source_db.fetch_chunk(chunk_id.as_slice(), None, CURRENT_STATE_SYNC_VERSION, grove_version)?; - let (more_chunks, new_state_sync_info) = target_db.apply_chunk(state_sync_info, chunk_id.as_slice(), ops, target_tx, CURRENT_STATE_SYNC_VERSION, grove_version)?; - state_sync_info = new_state_sync_info; + + let more_chunks = session.apply_chunk(&target_db, chunk_id.as_slice(), ops, CURRENT_STATE_SYNC_VERSION, grove_version)?; chunk_queue.extend(more_chunks); } + println!("num_chunks: {}", num_chunks); + + if session.is_sync_completed() { + target_db.commit_session(session).expect("failed to commit session"); + } + let elapsed = start_time.elapsed(); + println!("state_synced in {:.2?}", elapsed); + Ok(()) } From c8108a2c00e188897d8c5343e865efb1160fb180 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 14 Jan 2025 15:45:04 +0700 Subject: [PATCH 2/8] fix: combine_with_higher_base_epoch_remove_bytes should use old owner id instead of merged one (#349) --- grovedb-epoch-based-storage-flags/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/grovedb-epoch-based-storage-flags/src/lib.rs b/grovedb-epoch-based-storage-flags/src/lib.rs index add8765b..206d5254 100644 --- a/grovedb-epoch-based-storage-flags/src/lib.rs +++ b/grovedb-epoch-based-storage-flags/src/lib.rs @@ -244,7 +244,8 @@ impl StorageFlags { "can not remove bytes when there is no epoch".to_string(), )); } - let identifier = owner_id.copied().unwrap_or_default(); + // we must use our owner id, because we would be removing bytes from it + let identifier = self.owner_id().copied().unwrap_or_default(); let sectioned_bytes = sectioned_bytes_by_identifier.get(&identifier).ok_or( StorageFlagsError::MergingStorageFlagsFromDifferentOwners( "can not remove bytes when there is no epoch".to_string(), From 6a7643e66f23c5073a85ec5b6cb90abf7bca384e Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 14 Jan 2025 16:33:40 +0700 Subject: [PATCH 3/8] update dependencies and small cleanup (#350) * reduce dependency tree * downgraded axum * downgraded axum * downgraded axum * a fix --- costs/Cargo.toml | 4 +- costs/src/storage_cost/removal.rs | 8 +- grovedb-epoch-based-storage-flags/Cargo.toml | 4 +- grovedb-epoch-based-storage-flags/src/lib.rs | 70 ++++++----- grovedb-version/Cargo.toml | 2 +- grovedb/Cargo.toml | 22 ++-- grovedb/benches/insertion_benchmark.rs | 26 ++--- grovedb/src/batch/batch_structure.rs | 26 ++--- .../estimated_costs/average_case_costs.rs | 24 ++-- grovedb/src/batch/estimated_costs/mod.rs | 12 +- .../batch/estimated_costs/worst_case_costs.rs | 24 ++-- grovedb/src/batch/just_in_time_cost_tests.rs | 6 +- grovedb/src/batch/key_info.rs | 24 ++-- grovedb/src/batch/mod.rs | 4 +- grovedb/src/batch/mode.rs | 2 +- grovedb/src/batch/multi_insert_cost_tests.rs | 2 +- grovedb/src/batch/options.rs | 10 +- .../src/batch/single_deletion_cost_tests.rs | 2 +- grovedb/src/batch/single_insert_cost_tests.rs | 2 +- .../single_sum_item_deletion_cost_tests.rs | 2 +- .../single_sum_item_insert_cost_tests.rs | 2 +- grovedb/src/element/constructor.rs | 36 +++--- grovedb/src/element/delete.rs | 22 ++-- grovedb/src/element/get.rs | 24 ++-- grovedb/src/element/helpers.rs | 70 +++++------ grovedb/src/element/insert.rs | 22 ++-- grovedb/src/element/mod.rs | 50 ++++---- grovedb/src/element/query.rs | 62 +++++----- grovedb/src/element/serialize.rs | 10 +- grovedb/src/error.rs | 4 +- grovedb/src/lib.rs | 82 ++++++------- grovedb/src/operations/auxiliary.rs | 8 +- grovedb/src/operations/delete/average_case.rs | 6 +- .../src/operations/delete/delete_up_tree.rs | 8 +- grovedb/src/operations/delete/mod.rs | 28 ++--- grovedb/src/operations/delete/worst_case.rs | 4 +- grovedb/src/operations/get/average_case.rs | 8 +- grovedb/src/operations/get/mod.rs | 16 +-- grovedb/src/operations/get/query.rs | 14 +-- grovedb/src/operations/get/worst_case.rs | 8 +- grovedb/src/operations/insert/mod.rs | 20 ++-- grovedb/src/operations/is_empty_tree.rs | 8 +- grovedb/src/operations/mod.rs | 14 +-- grovedb/src/operations/proof/generate.rs | 5 +- grovedb/src/operations/proof/mod.rs | 5 +- grovedb/src/operations/proof/util.rs | 16 +-- grovedb/src/query/mod.rs | 30 ++--- grovedb/src/query_result_type.rs | 16 +-- grovedb/src/reference_path.rs | 24 ++-- grovedb/src/replication.rs | 2 +- merk/Cargo.toml | 33 ++---- merk/src/error.rs | 10 +- .../src/estimated_costs/average_case_costs.rs | 58 +++++----- merk/src/estimated_costs/mod.rs | 30 ++--- merk/src/estimated_costs/worst_case_costs.rs | 36 +++--- merk/src/lib.rs | 30 ++--- merk/src/merk/defaults.rs | 6 +- merk/src/merk/options.rs | 4 +- merk/src/proofs/chunk.rs | 4 +- merk/src/proofs/encoding.rs | 32 ++--- merk/src/proofs/mod.rs | 24 ++-- merk/src/proofs/query/common_path.rs | 2 +- merk/src/proofs/query/insert.rs | 2 +- merk/src/proofs/query/map.rs | 30 ++--- merk/src/proofs/query/merge.rs | 2 +- merk/src/proofs/query/mod.rs | 84 +++++++------- merk/src/proofs/query/query_item/merge.rs | 2 +- merk/src/proofs/query/query_item/mod.rs | 70 +++++------ merk/src/proofs/query/verify.rs | 24 ++-- merk/src/proofs/tree.rs | 56 ++++----- merk/src/test_utils/temp_merk.rs | 18 +-- merk/src/tree/commit.rs | 10 +- merk/src/tree/debug.rs | 109 +++++++++++++----- merk/src/tree/encoding.rs | 16 +-- merk/src/tree/fuzz_tests.rs | 24 ++-- merk/src/tree/hash.rs | 30 ++--- merk/src/tree/iter.rs | 14 +-- merk/src/tree/kv.rs | 26 ++--- merk/src/tree/link.rs | 26 ++--- merk/src/tree/mod.rs | 72 ++++++------ merk/src/tree/ops.rs | 24 ++-- merk/src/tree/tree_feature_type.rs | 18 +-- merk/src/tree/walk/fetch.rs | 10 +- merk/src/tree/walk/mod.rs | 22 ++-- merk/src/tree/walk/ref_walker.rs | 10 +- storage/Cargo.toml | 4 +- visualize/Cargo.toml | 2 +- 87 files changed, 954 insertions(+), 920 deletions(-) diff --git a/costs/Cargo.toml b/costs/Cargo.toml index 44a36342..aebaab67 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -10,6 +10,6 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] -thiserror = "1.0.59" -intmap = "2.0.0" +thiserror = "2.0.11" +intmap = "3.0.1" integer-encoding = "4.0.0" diff --git a/costs/src/storage_cost/removal.rs b/costs/src/storage_cost/removal.rs index 9fa7af99..93c4d396 100644 --- a/costs/src/storage_cost/removal.rs +++ b/costs/src/storage_cost/removal.rs @@ -43,10 +43,10 @@ use crate::storage_cost::removal::StorageRemovedBytes::{ pub type Identifier = [u8; 32]; /// Unknown Epoch -pub const UNKNOWN_EPOCH: u64 = u64::MAX; +pub const UNKNOWN_EPOCH: u16 = u16::MAX; /// A BTreeMap mapping identities to the storage they removed by epoch -pub type StorageRemovalPerEpochByIdentifier = BTreeMap>; +pub type StorageRemovalPerEpochByIdentifier = BTreeMap>; /// Removal bytes #[derive(Debug, PartialEq, Clone, Eq, Default)] @@ -122,7 +122,7 @@ impl Add for StorageRemovedBytes { }; (k, combined) }) - .collect::>(); + .collect::>(); intersection.into_iter().chain(int_map_b).collect() } else { int_map_b @@ -193,7 +193,7 @@ impl AddAssign for StorageRemovedBytes { }; (k, combined) }) - .collect::>(); + .collect::>(); intersection.into_iter().chain(int_map_b).collect() } else { int_map_b diff --git a/grovedb-epoch-based-storage-flags/Cargo.toml b/grovedb-epoch-based-storage-flags/Cargo.toml index b2832ed8..5e3cde71 100644 --- a/grovedb-epoch-based-storage-flags/Cargo.toml +++ b/grovedb-epoch-based-storage-flags/Cargo.toml @@ -12,5 +12,5 @@ grovedb-costs = { version = "2.1.0", path = "../costs" } hex = { version = "0.4.3" } integer-encoding = { version = "4.0.0" } -intmap = { version = "2.0.0", features = ["serde"]} -thiserror = { version = "1.0.63" } +intmap = { version = "3.0.1", features = ["serde"]} +thiserror = { version = "2.0.11" } diff --git a/grovedb-epoch-based-storage-flags/src/lib.rs b/grovedb-epoch-based-storage-flags/src/lib.rs index 206d5254..42551790 100644 --- a/grovedb-epoch-based-storage-flags/src/lib.rs +++ b/grovedb-epoch-based-storage-flags/src/lib.rs @@ -256,13 +256,13 @@ impl StorageFlags { sectioned_bytes .iter() .try_for_each(|(epoch, removed_bytes)| { - if *epoch == base_epoch as u64 { + if epoch == base_epoch { return Ok::<(), StorageFlagsError>(()); } - let bytes_added_in_epoch = other_epoch_bytes.get_mut(&(*epoch as u16)).ok_or( + let bytes_added_in_epoch = other_epoch_bytes.get_mut(&epoch).ok_or( StorageFlagsError::RemovingAtEpochWithNoAssociatedStorage(format!( "can not remove bytes when there is no epoch number [{}]", - *epoch + epoch )), )?; @@ -274,7 +274,7 @@ impl StorageFlags { if desired_bytes_in_epoch <= MINIMUM_NON_BASE_FLAGS_SIZE { // Collect the key to remove later - keys_to_remove.push(*epoch as u16); + keys_to_remove.push(epoch); } else { *bytes_added_in_epoch = desired_bytes_in_epoch; } @@ -736,10 +736,10 @@ impl StorageFlags { return NoStorageRemoval; } let bytes_left = removed_bytes; - let mut sectioned_storage_removal: IntMap = IntMap::default(); + let mut sectioned_storage_removal: IntMap = IntMap::default(); if bytes_left > 0 { // We need to take some from the base epoch - sectioned_storage_removal.insert(*base_epoch as u64, removed_bytes); + sectioned_storage_removal.insert(*base_epoch, removed_bytes); } let mut sectioned_storage_removal_by_identifier: StorageRemovalPerEpochByIdentifier = BTreeMap::new(); @@ -764,19 +764,17 @@ impl StorageFlags { } let mut bytes_left = removed_bytes; let mut rev_iter = other_epoch_bytes.iter().rev(); - let mut sectioned_storage_removal: IntMap = IntMap::default(); + let mut sectioned_storage_removal: IntMap = IntMap::default(); while bytes_left > 0 { if let Some((epoch_index, bytes_in_epoch)) = rev_iter.next() { if *bytes_in_epoch <= bytes_left + MINIMUM_NON_BASE_FLAGS_SIZE { - sectioned_storage_removal.insert( - *epoch_index as u64, - *bytes_in_epoch - MINIMUM_NON_BASE_FLAGS_SIZE, - ); + sectioned_storage_removal + .insert(*epoch_index, *bytes_in_epoch - MINIMUM_NON_BASE_FLAGS_SIZE); bytes_left -= *bytes_in_epoch - MINIMUM_NON_BASE_FLAGS_SIZE; } else { // Correctly take only the required bytes_left from this epoch - sectioned_storage_removal.insert(*epoch_index as u64, bytes_left); + sectioned_storage_removal.insert(*epoch_index, bytes_left); bytes_left = 0; // All required bytes have been removed, stop processing break; // Exit the loop as there's no need to process // further epochs @@ -788,7 +786,7 @@ impl StorageFlags { if bytes_left > 0 { // If there are still bytes left, take them from the base epoch - sectioned_storage_removal.insert(*base_epoch as u64, bytes_left); + sectioned_storage_removal.insert(*base_epoch, bytes_left); } let mut sectioned_storage_removal_by_identifier: StorageRemovalPerEpochByIdentifier = @@ -1185,7 +1183,7 @@ mod storage_flags_tests { fn single_epoch_removed_bytes_map( owner_id: [u8; 32], - epoch_index: u64, + epoch_index: u16, bytes_removed: u32, ) -> StorageRemovalPerEpochByIdentifier { let mut removed_bytes = StorageRemovalPerEpochByIdentifier::default(); @@ -1197,7 +1195,7 @@ mod storage_flags_tests { fn multi_epoch_removed_bytes_map( owner_id: [u8; 32], - removed_bytes_per_epoch: IntMap, + removed_bytes_per_epoch: IntMap, ) -> StorageRemovalPerEpochByIdentifier { let mut removed_bytes = StorageRemovalPerEpochByIdentifier::default(); removed_bytes.insert(owner_id, removed_bytes_per_epoch); @@ -1242,9 +1240,9 @@ mod storage_flags_tests { let mut removed_bytes = IntMap::new(); for i in 1..200 { other_epochs.insert(i, MINIMUM_NON_BASE_FLAGS_SIZE + 1); - removed_bytes.insert(i as u64, 1); // anything between 1 and - // MINIMUM_NON_BASE_FLAGS_SIZE + - // 1 would be the same + removed_bytes.insert(i, 1); // anything between 1 and + // MINIMUM_NON_BASE_FLAGS_SIZE + + // 1 would be the same } let left_flag = StorageFlags::MultiEpochOwned(left_base_index, other_epochs, owner_id); @@ -1281,7 +1279,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(default_owner_id(), IntMap::from_iter([(5u64, 100)])); + map.insert(default_owner_id(), IntMap::from_iter([(5u16, 100)])); map }) ); @@ -1289,7 +1287,7 @@ mod storage_flags_tests { value_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(default_owner_id(), IntMap::from_iter([(5u64, 200)])); + map.insert(default_owner_id(), IntMap::from_iter([(5u16, 200)])); map }) ); @@ -1306,7 +1304,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(owner_id, IntMap::from_iter([(5u64, 50)])); + map.insert(owner_id, IntMap::from_iter([(5u16, 50)])); map }) ); @@ -1314,7 +1312,7 @@ mod storage_flags_tests { value_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(owner_id, IntMap::from_iter([(5u64, 150)])); + map.insert(owner_id, IntMap::from_iter([(5u16, 150)])); map }) ); @@ -1336,7 +1334,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( default_owner_id(), - IntMap::from_iter([(7u64, 197), (6u64, 53)]), + IntMap::from_iter([(7u16, 197), (6u16, 53)]), ); map }) @@ -1357,7 +1355,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(default_owner_id(), IntMap::from_iter([(5u64, 250)])); + map.insert(default_owner_id(), IntMap::from_iter([(5u16, 250)])); map }) ); @@ -1367,7 +1365,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( default_owner_id(), - IntMap::from_iter([(7u64, 47), (6u64, 97), (5u64, 106)]), + IntMap::from_iter([(7u16, 47), (6u16, 97), (5u16, 106)]), ); map }) @@ -1388,7 +1386,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(owner_id, IntMap::from_iter([(5u64, 250)])); + map.insert(owner_id, IntMap::from_iter([(5u16, 250)])); map }) ); @@ -1398,7 +1396,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( owner_id, - IntMap::from_iter([(7u64, 47), (6u64, 97), (5u64, 106)]), + IntMap::from_iter([(7u16, 47), (6u16, 97), (5u16, 106)]), ); map }) @@ -1427,7 +1425,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(owner_id, IntMap::from_iter([(5u64, 100)])); + map.insert(owner_id, IntMap::from_iter([(5u16, 100)])); map }) ); @@ -1435,7 +1433,7 @@ mod storage_flags_tests { value_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(owner_id, IntMap::from_iter([(5u64, 50)])); + map.insert(owner_id, IntMap::from_iter([(5u16, 50)])); map }) ); @@ -1456,7 +1454,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(default_owner_id(), IntMap::from_iter([(5u64, 400)])); + map.insert(default_owner_id(), IntMap::from_iter([(5u16, 400)])); map }) ); @@ -1466,7 +1464,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( default_owner_id(), - IntMap::from_iter([(7u64, 197), (6u64, 97), (5u64, 6)]), + IntMap::from_iter([(7u16, 197), (6u16, 97), (5u16, 6)]), ); map }) @@ -1487,7 +1485,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(owner_id, IntMap::from_iter([(5u64, 450)])); + map.insert(owner_id, IntMap::from_iter([(5u16, 450)])); map }) ); @@ -1497,7 +1495,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( owner_id, - IntMap::from_iter([(7u64, 197), (6u64, 97), (5u64, 56)]), + IntMap::from_iter([(7u16, 197), (6u16, 97), (5u16, 56)]), ); map }) @@ -1551,7 +1549,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( owner_id, - IntMap::from_iter([(5u64, 6), (6u64, 297), (7u64, 397)]), + IntMap::from_iter([(5u16, 6), (6u16, 297), (7u16, 397)]), ); map }) @@ -1570,7 +1568,7 @@ mod storage_flags_tests { key_removal, StorageRemovedBytes::SectionedStorageRemoval({ let mut map = BTreeMap::new(); - map.insert(default_owner_id(), IntMap::from_iter([(5u64, 400)])); + map.insert(default_owner_id(), IntMap::from_iter([(5u16, 400)])); map }) ); @@ -1580,7 +1578,7 @@ mod storage_flags_tests { let mut map = BTreeMap::new(); map.insert( default_owner_id(), - IntMap::from_iter([(7u64, 97), (6u64, 297), (5u64, 106)]), + IntMap::from_iter([(7u16, 97), (6u16, 297), (5u16, 106)]), ); map }) diff --git a/grovedb-version/Cargo.toml b/grovedb-version/Cargo.toml index 78716416..a46cb758 100644 --- a/grovedb-version/Cargo.toml +++ b/grovedb-version/Cargo.toml @@ -8,7 +8,7 @@ license = "MIT" repository = "https://github.com/dashpay/grovedb" [dependencies] -thiserror = "1.0.59" +thiserror = "2.0.11" versioned-feature-core = "1.0.0" [features] diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index f84735c0..de616e55 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -19,23 +19,20 @@ grovedb-storage = { version = "2.1.0", path = "../storage", optional = true } grovedb-version = { version = "2.1.0", path = "../grovedb-version" } grovedb-visualize = { version = "2.1.0", path = "../visualize", optional = true } -axum = { version = "0.7.5", features = ["macros"], optional = true } +axum = { version = "=0.7.5", features = ["macros"], optional = true } bincode = { version = "2.0.0-rc.3" } -bitvec = "1" -blake3 = "1.4.0" -derive_more = "0.99.18" +blake3 = "1.5.5" hex = "0.4.3" -indexmap = "2.2.6" +indexmap = "2.7.0" integer-encoding = { version = "4.0.0", optional = true } -intmap = { version = "2.0.0", optional = true } -itertools = { version = "0.12.1", optional = true } -nohash-hasher = { version = "0.2.0", optional = true } +intmap = { version = "3.0.1", optional = true } +itertools = { version = "0.14.0", optional = true } tempfile = { version = "3.10.1", optional = true } -thiserror = { version = "1.0.59", optional = true } +thiserror = { version = "2.0.11", optional = true } tokio-util = { version = "0.7.12", optional = true } tokio = { version = "1.40.0", features = ["rt-multi-thread", "net"], optional = true } tower-http = { version = "0.5.2", features = ["fs"], optional = true } -zip-extensions = { version ="0.6.2", optional = true } +zip-extensions = { version = "0.8.1", optional = true } serde = { version = "1.0.210", features = ["derive"], optional = true } [dev-dependencies] @@ -56,6 +53,10 @@ proof_debug = ["grovedb-merk/proof_debug"] serde = ["dep:serde", "grovedb-merk/serde", "indexmap/serde"] full = [ "grovedb-merk/full", + "minimal", +] +minimal = [ + "grovedb-merk/minimal", "thiserror", "tempfile", "grovedb-storage/rocksdb_storage", @@ -63,7 +64,6 @@ full = [ "itertools", "integer-encoding", "grovedb-costs", - "nohash-hasher", "intmap", ] visualize = [ diff --git a/grovedb/benches/insertion_benchmark.rs b/grovedb/benches/insertion_benchmark.rs index 051a32d1..6e737015 100644 --- a/grovedb/benches/insertion_benchmark.rs +++ b/grovedb/benches/insertion_benchmark.rs @@ -28,24 +28,24 @@ //! Insertion Benchmark -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use criterion::{criterion_group, criterion_main, Criterion}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb::{Element, GroveDb}; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use rand::Rng; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use tempfile::TempDir; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] const N_ITEMS: usize = 10_000; const EMPTY_PATH: SubtreePath<'static, [u8; 0]> = SubtreePath::empty(); /// Benchmark function to insert '''N_ITEMS''' key-values into an empty tree /// without a transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn insertion_benchmark_without_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); @@ -82,7 +82,7 @@ pub fn insertion_benchmark_without_transaction(c: &mut Criterion) { /// Benchmark function to insert '''N_ITEMS''' key-values into an empty tree /// with a transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn insertion_benchmark_with_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); @@ -120,7 +120,7 @@ pub fn insertion_benchmark_with_transaction(c: &mut Criterion) { } /// Benchmark function to insert 10 root leaves without a transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn root_leaf_insertion_benchmark_without_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); @@ -145,7 +145,7 @@ pub fn root_leaf_insertion_benchmark_without_transaction(c: &mut Criterion) { } /// Benchmark function to insert 10 root leaves with a transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn root_leaf_insertion_benchmark_with_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); @@ -173,7 +173,7 @@ pub fn root_leaf_insertion_benchmark_with_transaction(c: &mut Criterion) { /// Benchmark function to insert a subtree nested within 10 higher subtrees /// and insert key-values into it without a transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn deeply_nested_insertion_benchmark_without_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); @@ -214,7 +214,7 @@ pub fn deeply_nested_insertion_benchmark_without_transaction(c: &mut Criterion) /// Benchmark function to insert a subtree nested within 10 higher subtrees /// and insert key-values into it with a transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn deeply_nested_insertion_benchmark_with_transaction(c: &mut Criterion) { let dir = TempDir::new().unwrap(); let db = GroveDb::open(dir.path()).unwrap(); @@ -255,7 +255,7 @@ pub fn deeply_nested_insertion_benchmark_with_transaction(c: &mut Criterion) { }); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] criterion_group!( benches, insertion_benchmark_without_transaction, @@ -265,5 +265,5 @@ criterion_group!( deeply_nested_insertion_benchmark_without_transaction, deeply_nested_insertion_benchmark_with_transaction, ); -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] criterion_main!(benches); diff --git a/grovedb/src/batch/batch_structure.rs b/grovedb/src/batch/batch_structure.rs index 9cfe03db..03694379 100644 --- a/grovedb/src/batch/batch_structure.rs +++ b/grovedb/src/batch/batch_structure.rs @@ -1,33 +1,33 @@ //! Batch structure -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{collections::BTreeMap, fmt}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, storage_cost::{removal::StorageRemovedBytes, StorageCost}, CostResult, CostsExt, OperationCost, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_visualize::{DebugByteVectors, DebugBytes}; -#[cfg(feature = "full")] -use nohash_hasher::IntMap; +#[cfg(feature = "minimal")] +use intmap::IntMap; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ batch::{key_info::KeyInfo, GroveOp, KeyInfoPath, QualifiedGroveDbOp, TreeCache}, Element, ElementFlags, Error, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub type OpsByPath = BTreeMap>; /// Level, path, key, op -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub type OpsByLevelPath = IntMap; /// Batch structure -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub(super) struct BatchStructure { /// Operations by level path pub(super) ops_by_level_paths: OpsByLevelPath, @@ -45,7 +45,7 @@ pub(super) struct BatchStructure { pub(super) last_level: u32, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl fmt::Debug for BatchStructure { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut fmt_int_map = IntMap::default(); @@ -60,7 +60,7 @@ impl fmt::Debug for BatchStructure { } fmt_path_map.insert(DebugByteVectors(path.to_path()), fmt_key_map); } - fmt_int_map.insert(*level, fmt_path_map); + fmt_int_map.insert(level, fmt_path_map); } f.debug_struct("BatchStructure") @@ -71,7 +71,7 @@ impl fmt::Debug for BatchStructure { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl BatchStructure where C: TreeCache, @@ -146,7 +146,7 @@ where } let level = op.path.len(); - if let Some(ops_on_level) = ops_by_level_paths.get_mut(&level) { + if let Some(ops_on_level) = ops_by_level_paths.get_mut(level) { if let Some(ops_on_path) = ops_on_level.get_mut(&op.path) { ops_on_path.insert(op.key, op.op); } else { diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index ef64b0f4..8d4c076a 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -1,29 +1,29 @@ //! Average case costs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{ collections::{BTreeMap, HashMap}, fmt, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; use grovedb_merk::RootHashKeyAndSum; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{ estimated_costs::average_case_costs::{average_case_merk_propagate, EstimatedLayerInformation}, IsSumTree, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use itertools::Itertools; use crate::Element; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ batch::{ key_info::KeyInfo, mode::BatchRunMode, BatchApplyOptions, GroveOp, KeyInfoPath, @@ -32,7 +32,7 @@ use crate::{ Error, GroveDb, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveOp { /// Get the estimated average case cost of the op. Calls a lower level /// function to calculate the estimate based on the type of op. Returns @@ -137,7 +137,7 @@ impl GroveOp { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Cache for subtree paths for average case scenario costs. #[derive(Default)] pub(in crate::batch) struct AverageCaseTreeCacheKnownPaths { @@ -145,7 +145,7 @@ pub(in crate::batch) struct AverageCaseTreeCacheKnownPaths { cached_merks: HashMap, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl AverageCaseTreeCacheKnownPaths { /// Updates the cache to the default setting with the given subtree paths pub(in crate::batch) fn new_with_estimated_layer_information( @@ -158,14 +158,14 @@ impl AverageCaseTreeCacheKnownPaths { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl fmt::Debug for AverageCaseTreeCacheKnownPaths { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TreeCacheKnownPaths").finish() } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeCache for AverageCaseTreeCacheKnownPaths { fn insert(&mut self, op: &QualifiedGroveDbOp, is_sum_tree: bool) -> CostResult<(), Error> { let mut average_case_cost = OperationCost::default(); @@ -291,7 +291,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use std::collections::HashMap; diff --git a/grovedb/src/batch/estimated_costs/mod.rs b/grovedb/src/batch/estimated_costs/mod.rs index 54fc109c..37b909da 100644 --- a/grovedb/src/batch/estimated_costs/mod.rs +++ b/grovedb/src/batch/estimated_costs/mod.rs @@ -1,23 +1,23 @@ //! Estimated costs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::collections::HashMap; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::estimated_costs::{ average_case_costs::EstimatedLayerInformation, worst_case_costs::WorstCaseLayerInformation, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::batch::KeyInfoPath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod average_case_costs; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod worst_case_costs; /// Estimated costs types -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub enum EstimatedCostsType { /// Average cast estimated costs type AverageCaseCostsType(HashMap), diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index 9bf9a808..5e4f6e42 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -1,28 +1,28 @@ //! Worst case costs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::estimated_costs::worst_case_costs::{ worst_case_merk_propagate, WorstCaseLayerInformation, }; use grovedb_merk::RootHashKeyAndSum; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use itertools::Itertools; use crate::Element; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ batch::{ key_info::KeyInfo, mode::BatchRunMode, BatchApplyOptions, GroveOp, KeyInfoPath, @@ -31,7 +31,7 @@ use crate::{ Error, GroveDb, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveOp { fn worst_case_cost( &self, @@ -133,7 +133,7 @@ impl GroveOp { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Cache for subtree paths for worst case scenario costs. #[derive(Default)] pub(in crate::batch) struct WorstCaseTreeCacheKnownPaths { @@ -141,7 +141,7 @@ pub(in crate::batch) struct WorstCaseTreeCacheKnownPaths { cached_merks: HashSet, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl WorstCaseTreeCacheKnownPaths { /// Updates the cache with the default settings and the given paths pub(in crate::batch) fn new_with_worst_case_layer_information( @@ -154,14 +154,14 @@ impl WorstCaseTreeCacheKnownPaths { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl fmt::Debug for WorstCaseTreeCacheKnownPaths { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TreeCacheKnownPaths").finish() } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeCache for WorstCaseTreeCacheKnownPaths { fn insert(&mut self, op: &QualifiedGroveDbOp, _is_sum_tree: bool) -> CostResult<(), Error> { let mut worst_case_cost = OperationCost::default(); @@ -262,7 +262,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use std::collections::HashMap; diff --git a/grovedb/src/batch/just_in_time_cost_tests.rs b/grovedb/src/batch/just_in_time_cost_tests.rs index 9c45680b..3f083763 100644 --- a/grovedb/src/batch/just_in_time_cost_tests.rs +++ b/grovedb/src/batch/just_in_time_cost_tests.rs @@ -1,7 +1,7 @@ //! This tests just in time costs //! Just in time costs modify the tree in the same batch -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod tests { use std::{collections::BTreeMap, option::Option::None}; @@ -24,7 +24,7 @@ mod tests { fn single_epoch_removed_bytes_map( owner_id: [u8; 32], - epoch_index: u64, + epoch_index: u16, bytes_removed: u32, ) -> StorageRemovalPerEpochByIdentifier { let mut removed_bytes = StorageRemovalPerEpochByIdentifier::default(); @@ -346,7 +346,7 @@ mod tests { let ops_by_root_path = left_over_ops .as_ref() .unwrap() - .get(&0) + .get(0) .expect("expected to have root path"); assert_eq!(ops_by_root_path.len(), 1); let new_ops = vec![QualifiedGroveDbOp::insert_or_replace_op( diff --git a/grovedb/src/batch/key_info.rs b/grovedb/src/batch/key_info.rs index e7dd25b5..9c61766f 100644 --- a/grovedb/src/batch/key_info.rs +++ b/grovedb/src/batch/key_info.rs @@ -1,21 +1,21 @@ //! Key info -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{ cmp::Ordering, hash::{Hash, Hasher}, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::worst_case_costs::WorstKeyLength; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_visualize::{Drawer, Visualize}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::batch::key_info::KeyInfo::{KnownKey, MaxKeySize}; /// Key info -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, Eq, Debug)] pub enum KeyInfo { /// Known key @@ -29,7 +29,7 @@ pub enum KeyInfo { }, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl PartialEq for KeyInfo { fn eq(&self, other: &Self) -> bool { match (self, other) { @@ -69,14 +69,14 @@ impl PartialEq<&[u8]> for KeyInfo { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl PartialOrd for KeyInfo { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Ord for KeyInfo { fn cmp(&self, other: &Self) -> Ordering { match self.as_slice().cmp(other.as_slice()) { @@ -90,7 +90,7 @@ impl Ord for KeyInfo { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Hash for KeyInfo { fn hash(&self, state: &mut H) { match self { @@ -106,7 +106,7 @@ impl Hash for KeyInfo { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl WorstKeyLength for KeyInfo { fn max_length(&self) -> u8 { match self { @@ -116,7 +116,7 @@ impl WorstKeyLength for KeyInfo { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl KeyInfo { /// Return self as slice pub fn as_slice(&self) -> &[u8] { @@ -143,7 +143,7 @@ impl KeyInfo { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Visualize for KeyInfo { fn visualize(&self, mut drawer: Drawer) -> std::io::Result> { match self { diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 2767ab2b..7f73fea0 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -1741,7 +1741,7 @@ impl GroveDb { let stop_level = batch_apply_options.batch_pause_height.unwrap_or_default() as u32; // We will update up the tree - while let Some(ops_at_level) = ops_by_level_paths.remove(¤t_level) { + while let Some(ops_at_level) = ops_by_level_paths.remove(current_level) { for (path, ops_at_path) in ops_at_level.into_iter() { if current_level == 0 { // execute the ops at this path @@ -1794,7 +1794,7 @@ impl GroveDb { // operations up for the level above if let Some((key, parent_path)) = path.split_last() { if let Some(ops_at_level_above) = - ops_by_level_paths.get_mut(&(current_level - 1)) + ops_by_level_paths.get_mut(current_level - 1) { // todo: fix this hack let parent_path = KeyInfoPath(parent_path.to_vec()); diff --git a/grovedb/src/batch/mode.rs b/grovedb/src/batch/mode.rs index 897d15f2..2cc2e307 100644 --- a/grovedb/src/batch/mode.rs +++ b/grovedb/src/batch/mode.rs @@ -11,7 +11,7 @@ use grovedb_merk::estimated_costs::{ #[cfg(feature = "estimated_costs")] use crate::batch::KeyInfoPath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Batch Running Mode #[derive(Clone, PartialEq, Eq)] pub enum BatchRunMode { diff --git a/grovedb/src/batch/multi_insert_cost_tests.rs b/grovedb/src/batch/multi_insert_cost_tests.rs index 666de224..370aac91 100644 --- a/grovedb/src/batch/multi_insert_cost_tests.rs +++ b/grovedb/src/batch/multi_insert_cost_tests.rs @@ -1,6 +1,6 @@ //! Multi insert cost tests -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod tests { use std::{ops::Add, option::Option::None}; diff --git a/grovedb/src/batch/options.rs b/grovedb/src/batch/options.rs index 1f60aeb4..b21a2067 100644 --- a/grovedb/src/batch/options.rs +++ b/grovedb/src/batch/options.rs @@ -1,13 +1,13 @@ //! Options -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::MerkOptions; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::operations::{delete::DeleteOptions, insert::InsertOptions}; /// Batch apply options -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Debug, Clone)] pub struct BatchApplyOptions { /// Validate insertion does not override @@ -27,7 +27,7 @@ pub struct BatchApplyOptions { pub batch_pause_height: Option, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for BatchApplyOptions { fn default() -> Self { BatchApplyOptions { @@ -42,7 +42,7 @@ impl Default for BatchApplyOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl BatchApplyOptions { /// As insert options pub(crate) fn as_insert_options(&self) -> InsertOptions { diff --git a/grovedb/src/batch/single_deletion_cost_tests.rs b/grovedb/src/batch/single_deletion_cost_tests.rs index e2b3d9a9..c84a2e47 100644 --- a/grovedb/src/batch/single_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_deletion_cost_tests.rs @@ -1,6 +1,6 @@ //! Tests -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod tests { use grovedb_costs::storage_cost::removal::{ diff --git a/grovedb/src/batch/single_insert_cost_tests.rs b/grovedb/src/batch/single_insert_cost_tests.rs index d86a2bf2..b3750eca 100644 --- a/grovedb/src/batch/single_insert_cost_tests.rs +++ b/grovedb/src/batch/single_insert_cost_tests.rs @@ -1,6 +1,6 @@ //! Tests -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod tests { use grovedb_costs::{ storage_cost::{ diff --git a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs index c0be9a08..7db03b7f 100644 --- a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs @@ -1,6 +1,6 @@ //! Tests -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod tests { use grovedb_version::version::GroveVersion; diff --git a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs index d58e7327..ae16eaa2 100644 --- a/grovedb/src/batch/single_sum_item_insert_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_insert_cost_tests.rs @@ -1,6 +1,6 @@ //! Tests -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod tests { use grovedb_costs::{ storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, diff --git a/grovedb/src/element/constructor.rs b/grovedb/src/element/constructor.rs index 91143ec8..1d5049cd 100644 --- a/grovedb/src/element/constructor.rs +++ b/grovedb/src/element/constructor.rs @@ -1,7 +1,7 @@ //! Constructor //! Functions for setting an element's type -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ element::{MaxReferenceHop, SumValue}, reference_path::ReferencePathType, @@ -9,62 +9,62 @@ use crate::{ }; impl Element { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to default empty tree without flags // TODO: improve API to avoid creation of Tree elements with uncertain state pub fn empty_tree() -> Self { Element::new_tree(Default::default()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to default empty tree with flags pub fn empty_tree_with_flags(flags: Option) -> Self { Element::new_tree_with_flags(Default::default(), flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to default empty sum tree without flags pub fn empty_sum_tree() -> Self { Element::new_sum_tree(Default::default()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to default empty sum tree with flags pub fn empty_sum_tree_with_flags(flags: Option) -> Self { Element::new_sum_tree_with_flags(Default::default(), flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to an item without flags pub fn new_item(item_value: Vec) -> Self { Element::Item(item_value, None) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to an item with flags pub fn new_item_with_flags(item_value: Vec, flags: Option) -> Self { Element::Item(item_value, flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a sum item without flags pub fn new_sum_item(value: i64) -> Self { Element::SumItem(value, None) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a sum item with flags pub fn new_sum_item_with_flags(value: i64, flags: Option) -> Self { Element::SumItem(value, flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a reference without flags pub fn new_reference(reference_path: ReferencePathType) -> Self { Element::Reference(reference_path, None, None) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a reference with flags pub fn new_reference_with_flags( reference_path: ReferencePathType, @@ -73,7 +73,7 @@ impl Element { Element::Reference(reference_path, None, flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a reference with hops, no flags pub fn new_reference_with_hops( reference_path: ReferencePathType, @@ -82,7 +82,7 @@ impl Element { Element::Reference(reference_path, max_reference_hop, None) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a reference with max hops and flags pub fn new_reference_with_max_hops_and_flags( reference_path: ReferencePathType, @@ -92,13 +92,13 @@ impl Element { Element::Reference(reference_path, max_reference_hop, flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a tree without flags pub fn new_tree(maybe_root_key: Option>) -> Self { Element::Tree(maybe_root_key, None) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a tree with flags pub fn new_tree_with_flags( maybe_root_key: Option>, @@ -107,13 +107,13 @@ impl Element { Element::Tree(maybe_root_key, flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a sum tree without flags pub fn new_sum_tree(maybe_root_key: Option>) -> Self { Element::SumTree(maybe_root_key, 0, None) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a sum tree with flags pub fn new_sum_tree_with_flags( maybe_root_key: Option>, @@ -122,7 +122,7 @@ impl Element { Element::SumTree(maybe_root_key, 0, flags) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Set element to a sum tree with flags and sum value pub fn new_sum_tree_with_flags_and_sum_value( maybe_root_key: Option>, diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index ced24e27..8c9b3511 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -1,26 +1,26 @@ //! Delete //! Implements functions in Element for deleting -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::OperationCost; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{storage_cost::removal::StorageRemovedBytes, CostResult, CostsExt}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::StorageContext; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::check_grovedb_v0_with_cost; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::error::GroveVersionError; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{Element, Error}; impl Element { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Delete an element from Merk under a key pub fn delete<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( merk: &mut Merk, @@ -53,7 +53,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Delete an element from Merk under a key pub fn delete_with_sectioned_removal_bytes<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( merk: &mut Merk, @@ -103,7 +103,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Delete an element from Merk under a key to batch operations pub fn delete_into_batch_operations>( key: K, diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index ded859d3..3c66b049 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -1,16 +1,16 @@ //! Get //! Implements functions in Element for getting -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; use grovedb_merk::tree::kv::KV; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::Merk; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{ed::Decode, tree::TreeNodeInner}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::StorageContext; use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, @@ -18,11 +18,11 @@ use grovedb_version::{ use integer_encoding::VarInt; use crate::element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{Element, Error, Hash}; impl Element { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get an element from Merk under a key; path should be resolved and proper /// Merk should be loaded by this moment pub fn get<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( @@ -47,7 +47,7 @@ impl Element { }) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get an element from Merk under a key; path should be resolved and proper /// Merk should be loaded by this moment pub fn get_optional<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( @@ -86,7 +86,7 @@ impl Element { Ok(element).wrap_with_cost(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get an element directly from storage under a key /// Merk does not need to be loaded /// Errors if element doesn't exist @@ -110,7 +110,7 @@ impl Element { }) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get an element directly from storage under a key /// Merk does not need to be loaded pub fn get_optional_from_storage<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( @@ -198,7 +198,7 @@ impl Element { Ok(element).wrap_with_cost(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get an element from Merk under a key; path should be resolved and proper /// Merk should be loaded by this moment pub fn get_with_absolute_refs<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( @@ -230,7 +230,7 @@ impl Element { Ok(absolute_element).wrap_with_cost(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get an element's value hash from Merk under a key pub fn get_value_hash<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( merk: &Merk, @@ -259,7 +259,7 @@ impl Element { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_path::SubtreePath; diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index 5b3662df..af74c6f8 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -1,36 +1,36 @@ //! Helpers //! Implements helper functions in Element -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::tree::kv::{ ValueDefinedCostType, ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{ tree::{kv::KV, TreeNode}, TreeFeatureType, TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::reference_path::path_from_reference_path_type; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::reference_path::ReferencePathType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, ElementFlags, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::{Element, Error}; impl Element { - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decoded the integer value in the SumItem element type, returns 0 for /// everything else pub fn sum_value_or_default(&self) -> i64 { @@ -40,7 +40,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decoded the integer value in the SumItem element type pub fn as_sum_item_value(&self) -> Result { match self { @@ -49,7 +49,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decoded the integer value in the SumItem element type pub fn into_sum_item_value(self) -> Result { match self { @@ -58,7 +58,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decoded the integer value in the SumTree element type pub fn as_sum_tree_value(&self) -> Result { match self { @@ -67,7 +67,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decoded the integer value in the SumTree element type pub fn into_sum_tree_value(self) -> Result { match self { @@ -76,7 +76,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Gives the item value in the Item element type pub fn as_item_bytes(&self) -> Result<&[u8], Error> { match self { @@ -85,7 +85,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Gives the item value in the Item element type pub fn into_item_bytes(self) -> Result, Error> { match self { @@ -94,7 +94,7 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Gives the reference path type in the Reference element type pub fn into_reference_path_type(self) -> Result { match self { @@ -103,49 +103,49 @@ impl Element { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a sum tree pub fn is_sum_tree(&self) -> bool { matches!(self, Element::SumTree(..)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a tree but not a sum tree pub fn is_basic_tree(&self) -> bool { matches!(self, Element::Tree(..)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a tree pub fn is_any_tree(&self) -> bool { matches!(self, Element::SumTree(..) | Element::Tree(..)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a reference pub fn is_reference(&self) -> bool { matches!(self, Element::Reference(..)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is an item pub fn is_any_item(&self) -> bool { matches!(self, Element::Item(..) | Element::SumItem(..)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is an item pub fn is_basic_item(&self) -> bool { matches!(self, Element::Item(..)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a sum item pub fn is_sum_item(&self) -> bool { matches!(self, Element::SumItem(..)) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get the tree feature type pub fn get_feature_type(&self, parent_is_sum_tree: bool) -> Result { match parent_is_sum_tree { @@ -154,7 +154,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Grab the optional flag stored in an element pub fn get_flags(&self) -> &Option { match self { @@ -166,7 +166,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Grab the optional flag stored in an element pub fn get_flags_owned(self) -> Option { match self { @@ -178,7 +178,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Grab the optional flag stored in an element as mutable pub fn get_flags_mut(&mut self) -> &mut Option { match self { @@ -190,7 +190,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Sets the optional flag stored in an element pub fn set_flags(&mut self, new_flags: Option) { match self { @@ -202,7 +202,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get the required item space pub fn required_item_space( len: u32, @@ -216,7 +216,7 @@ impl Element { Ok(len + len.required_space() as u32 + flag_len + flag_len.required_space() as u32 + 1) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Convert the reference to an absolute reference pub(crate) fn convert_if_reference_to_absolute_reference( self, @@ -247,7 +247,7 @@ impl Element { }) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get tree costs for a key value pub fn specialized_costs_for_key_value( key: &Vec, @@ -305,7 +305,7 @@ impl Element { Ok(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get tree cost for the element pub fn get_specialized_cost(&self, grove_version: &GroveVersion) -> Result { check_grovedb_v0!( @@ -322,7 +322,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get the value defined cost for a serialized value pub fn value_defined_cost(&self, grove_version: &GroveVersion) -> Option { let Some(value_cost) = self.get_specialized_cost(grove_version).ok() else { @@ -342,7 +342,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get the value defined cost for a serialized value pub fn value_defined_cost_for_serialized_value( value: &[u8], @@ -353,7 +353,7 @@ impl Element { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Decode from bytes pub fn raw_decode(bytes: &[u8], grove_version: &GroveVersion) -> Result { let tree = TreeNode::decode_raw( diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index ce0144a2..5b47acc4 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -15,7 +15,7 @@ use integer_encoding::VarInt; use crate::{Element, Element::SumItem, Error, Hash}; impl Element { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Insert an element in Merk under a key; path should be resolved and /// proper Merk should be loaded by this moment /// If transaction is not passed, the batch will be written immediately. @@ -71,7 +71,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Add to batch operations a "Put" op with key and serialized element. /// Return CostResult. pub fn insert_into_batch_operations>( @@ -114,7 +114,7 @@ impl Element { Ok(()).wrap_with_cost(Default::default()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Insert an element in Merk under a key if it doesn't yet exist; path /// should be resolved and proper Merk should be loaded by this moment /// If transaction is not passed, the batch will be written immediately. @@ -145,7 +145,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Adds a "Put" op to batch operations with the element and key if it /// doesn't exist yet. Returns CostResult. pub fn insert_if_not_exists_into_batch_operations< @@ -189,7 +189,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Insert an element in Merk under a key if the value is different from /// what already exists; path should be resolved and proper Merk should /// be loaded by this moment If transaction is not passed, the batch @@ -229,7 +229,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Adds a "Put" op to batch operations with the element and key if the /// value is different from what already exists; Returns CostResult. /// The bool represents if we indeed inserted. @@ -279,7 +279,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Insert a reference element in Merk under a key; path should be resolved /// and proper Merk should be loaded by this moment /// If transaction is not passed, the batch will be written immediately. @@ -329,7 +329,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Adds a "Put" op to batch operations with reference and key. Returns /// CostResult. pub fn insert_reference_into_batch_operations>( @@ -361,7 +361,7 @@ impl Element { Ok(()).wrap_with_cost(Default::default()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Insert a tree element in Merk under a key; path should be resolved /// and proper Merk should be loaded by this moment /// If transaction is not passed, the batch will be written immediately. @@ -416,7 +416,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Adds a "Put" op to batch operations for a subtree and key pub fn insert_subtree_into_batch_operations>( &self, @@ -465,7 +465,7 @@ impl Element { } } -#[cfg(feature = "full")] +#[cfg(all(feature = "minimal", feature = "test_utils"))] #[cfg(test)] mod tests { use grovedb_merk::test_utils::{empty_path_merk, empty_path_merk_read_only, TempMerk}; diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index 9986c624..c8ecbbc8 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -2,75 +2,75 @@ //! Subtrees handling is isolated so basically this module is about adapting //! Merk API to GroveDB needs. -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod constructor; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod delete; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod exists; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod get; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub(crate) mod helpers; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod insert; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod query; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use query::QueryOptions; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod serialize; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use std::fmt; use bincode::{Decode, Encode}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_merk::estimated_costs::SUM_VALUE_EXTRA_COST; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::estimated_costs::{LAYER_COST_SIZE, SUM_LAYER_COST_SIZE}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_visualize::visualize_to_vec; use crate::operations::proof::util::hex_to_ascii; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::reference_path::ReferencePathType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::OperationCost; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Optional meta-data to be stored per element pub type ElementFlags = Vec; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Optional single byte to represent the maximum number of reference hop to /// base element pub type MaxReferenceHop = Option; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// The cost of a tree pub const TREE_COST_SIZE: u32 = LAYER_COST_SIZE; // 3 -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// The cost of a sum item /// /// It is 11 because we have 9 bytes for the sum value /// 1 byte for the item type /// 1 byte for the flags option pub const SUM_ITEM_COST_SIZE: u32 = SUM_VALUE_EXTRA_COST + 2; // 11 -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// The cost of a sum tree pub const SUM_TREE_COST_SIZE: u32 = SUM_LAYER_COST_SIZE; // 12 -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// int 64 sum value pub type SumValue = i64; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Variants of GroveDB stored entities /// /// ONLY APPEND TO THIS LIST!!! Because /// of how serialization works. #[derive(Clone, Encode, Decode, PartialEq, Eq, Hash)] -#[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] +#[cfg_attr(not(any(feature = "minimal", feature = "visualize")), derive(Debug))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Element { /// An ordinary value @@ -157,7 +157,7 @@ impl Element { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub(crate) fn value_hash( &self, grove_version: &grovedb_version::version::GroveVersion, @@ -167,7 +167,7 @@ impl Element { } } -#[cfg(any(feature = "full", feature = "visualize"))] +#[cfg(any(feature = "minimal", feature = "visualize"))] impl fmt::Debug for Element { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut v = Vec::new(); diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index 7226db55..f1975aad 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -3,31 +3,31 @@ use std::fmt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostContext, CostResult, CostsExt, OperationCost, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::proofs::query::query_item::QueryItem; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::proofs::query::SubqueryBranch; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::proofs::Query; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::{ check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::Element; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ element::helpers::raw_decode, query_result_type::{ @@ -40,10 +40,10 @@ use crate::{ util::{merk_optional_tx, merk_optional_tx_internal_error, storage_context_optional_tx}, Error, PathQuery, TransactionArg, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{query_result_type::Path, SizedQuery}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Copy, Clone, Debug)] pub struct QueryOptions { pub allow_get_raw: bool, @@ -58,7 +58,7 @@ pub struct QueryOptions { pub error_if_intermediate_path_tree_not_present: bool, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for QueryOptions { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "QueryOptions {{")?; @@ -78,7 +78,7 @@ impl fmt::Display for QueryOptions { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Default for QueryOptions { fn default() -> Self { QueryOptions { @@ -90,7 +90,7 @@ impl Default for QueryOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Path query push arguments pub struct PathQueryPushArgs<'db, 'ctx, 'a> where @@ -111,7 +111,7 @@ where pub offset: &'a mut Option, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] fn format_query(query: &Query, indent: usize) -> String { let indent_str = " ".repeat(indent); let mut output = format!("{}Query {{\n", indent_str); @@ -147,7 +147,7 @@ fn format_query(query: &Query, indent: usize) -> String { output } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] fn format_subquery_branch(branch: &SubqueryBranch, indent: usize) -> String { let indent_str = " ".repeat(indent); let mut output = "SubqueryBranch {{\n".to_string(); @@ -169,7 +169,7 @@ fn format_subquery_branch(branch: &SubqueryBranch, indent: usize) -> String { output } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'db, 'ctx, 'a> fmt::Display for PathQueryPushArgs<'db, 'ctx, 'a> where 'db: 'ctx, @@ -230,7 +230,7 @@ where } impl Element { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Returns a vector of result elements based on given query pub fn get_query( storage: &RocksDbStorage, @@ -259,7 +259,7 @@ impl Element { .map_ok(|(elements, _)| elements) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Get values of result elements coming from given query pub fn get_query_values( storage: &RocksDbStorage, @@ -297,7 +297,7 @@ impl Element { }) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Returns a vector of result elements and the number of skipped items /// based on given query pub fn get_query_apply_function( @@ -382,7 +382,7 @@ impl Element { Ok((QueryResultElements::from_elements(results), skipped)).wrap_with_cost(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Returns a vector of elements excluding trees, and the number of skipped /// elements pub fn get_path_query( @@ -415,7 +415,7 @@ impl Element { ) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Returns a vector of elements, and the number of skipped elements pub fn get_sized_query( storage: &RocksDbStorage, @@ -443,7 +443,7 @@ impl Element { ) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Push arguments to path query fn path_query_push( args: PathQueryPushArgs, @@ -684,7 +684,7 @@ impl Element { Ok(()).wrap_with_cost(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Takes a sized query and a key and returns subquery key and subquery as /// tuple fn subquery_paths_and_value_for_sized_query( @@ -725,7 +725,7 @@ impl Element { /// trees where the sub elements have no matches, hence the limit would /// not decrease and hence we would continue on the increasingly /// expensive query. - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] // TODO: refactor fn query_item( storage: &RocksDbStorage, @@ -894,7 +894,7 @@ impl Element { .wrap_with_cost(cost) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] fn basic_push(args: PathQueryPushArgs, grove_version: &GroveVersion) -> Result<(), Error> { check_grovedb_v0!( "basic_push", @@ -950,7 +950,7 @@ impl Element { Ok(()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Iterator pub fn iterator(mut raw_iter: I) -> CostContext> { let mut cost = OperationCost::default(); @@ -959,7 +959,7 @@ impl Element { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_merk::proofs::Query; @@ -1687,12 +1687,12 @@ mod tests { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub struct ElementsIterator { raw_iter: I, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl ElementsIterator { pub fn new(raw_iter: I) -> Self { ElementsIterator { raw_iter } diff --git a/grovedb/src/element/serialize.rs b/grovedb/src/element/serialize.rs index 395fea8d..d0974cc9 100644 --- a/grovedb/src/element/serialize.rs +++ b/grovedb/src/element/serialize.rs @@ -4,11 +4,11 @@ use bincode::config; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::{Element, Error}; impl Element { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Serializes self. Returns vector of u8s. pub fn serialize(&self, grove_version: &GroveVersion) -> Result, Error> { check_grovedb_v0!( @@ -20,7 +20,7 @@ impl Element { .map_err(|e| Error::CorruptedData(format!("unable to serialize element {}", e))) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Serializes self. Returns usize. pub fn serialized_size(&self, grove_version: &GroveVersion) -> Result { check_grovedb_v0!( @@ -31,7 +31,7 @@ impl Element { .map(|serialized| serialized.len()) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Deserializes given bytes and sets as self pub fn deserialize(bytes: &[u8], grove_version: &GroveVersion) -> Result { check_grovedb_v0!( @@ -45,7 +45,7 @@ impl Element { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use integer_encoding::VarInt; diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index 92343935..4618e6ac 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -3,7 +3,7 @@ use std::convert::Infallible; /// GroveDB Errors -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, thiserror::Error)] pub enum Error { #[error("infallible")] @@ -80,7 +80,7 @@ pub enum Error { /// Invalid parameter InvalidParameter(&'static str), - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] // Irrecoverable errors #[error("storage_cost error: {0}")] /// Storage error diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index f3b2dcc4..8e0b088a 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -125,45 +125,45 @@ //! [Architectural Decision Records](https://github.com/dashpay/grovedb/tree/master/adr) or //! [Tutorial](https://www.grovedb.org/tutorials.html) -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod batch; #[cfg(feature = "grovedbg")] pub mod debugger; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod element; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod error; #[cfg(feature = "estimated_costs")] mod estimated_costs; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod operations; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod query; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod query_result_type; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod reference_path; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod replication; -#[cfg(all(test, feature = "full"))] +#[cfg(all(test, feature = "minimal"))] mod tests; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod util; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod visualize; #[cfg(feature = "grovedbg")] use std::sync::Arc; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{collections::HashMap, option::Option::None, path::Path}; #[cfg(feature = "grovedbg")] use debugger::start_visualizer; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use element::Element; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use element::ElementFlags; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; @@ -174,77 +174,77 @@ pub use grovedb_merk::estimated_costs::{ }, worst_case_costs::WorstCaseLayerInformation, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use grovedb_merk::proofs::query::query_item::QueryItem; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use grovedb_merk::proofs::Query; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{ self, tree::{combine_hash, value_hash}, BatchEntry, CryptoHash, KVIterator, Merk, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::PrefixedRocksDbImmediateStorageContext; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, StorageBatch, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::{Storage, StorageContext}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_visualize::DebugByteVectors; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use query::{PathQuery, SizedQuery}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use reference_path::path_from_reference_path_type; #[cfg(feature = "grovedbg")] use tokio::net::ToSocketAddrs; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::element::helpers::raw_decode; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use crate::error::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::Error::MerkError; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] type Hash = [u8; 32]; /// GroveDb pub struct GroveDb { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] db: RocksDbStorage, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; /// Transaction -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub type Transaction<'db> = >::Transaction; /// TransactionArg -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub type TransactionArg<'db, 'a> = Option<&'a Transaction<'db>>; /// Type alias for the return type of the `verify_merk_and_submerks` and /// `verify_grovedb` functions. It represents a mapping of paths (as vectors of /// vectors of bytes) to a tuple of three cryptographic hashes: the root hash, /// the combined value hash, and the expected value hash. -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] type VerificationIssues = HashMap>, (CryptoHash, CryptoHash, CryptoHash)>; /// Type alias for the return type of the `open_merk_for_replication` function. @@ -252,14 +252,14 @@ type VerificationIssues = HashMap>, (CryptoHash, CryptoHash, CryptoH /// - A `Merk` instance with a prefixed RocksDB immediate storage context. /// - An optional `root_key`, represented as a vector of bytes. /// - A boolean indicating whether the Merk is a sum tree. -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] type OpenedMerkForReplication<'tx> = ( Merk>, Option>, bool, ); -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Opens a given path pub fn open>(path: P) -> Result { diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 516796ed..493e6e74 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -28,22 +28,22 @@ //! Auxiliary operations -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, OperationCost, }; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::StorageContext; use grovedb_storage::{Storage, StorageBatch}; use grovedb_version::version::GroveVersion; use crate::util::storage_context_optional_tx; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{util::meta_storage_context_optional_tx, Element, Error, GroveDb, TransactionArg}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Put op for aux storage pub fn put_aux>( diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index 986f2b90..b828ce0c 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -32,7 +32,7 @@ impl GroveDb { key: &KeyInfo, stop_path_height: Option, validate: bool, - estimated_layer_info: IntMap, + estimated_layer_info: IntMap, grove_version: &GroveVersion, ) -> CostResult, Error> { check_grovedb_v0_with_cost!( @@ -69,7 +69,7 @@ impl GroveDb { ) = cost_return_on_error_no_add!( &cost, if height == path_len - 1 { - if let Some(layer_info) = estimated_layer_info.get(height as u64) { + if let Some(layer_info) = estimated_layer_info.get(height) { let estimated_value_len = cost_return_on_error_no_add!( &cost, layer_info @@ -94,7 +94,7 @@ impl GroveDb { } else { let (last_key, smaller_path) = used_path.split_last().unwrap(); used_path = smaller_path; - if let Some(layer_info) = estimated_layer_info.get(height as u64) { + if let Some(layer_info) = estimated_layer_info.get(height) { let estimated_value_len = cost_return_on_error_no_add!( &cost, layer_info diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index 7ecfce83..2255f29d 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -15,7 +15,7 @@ use crate::{ TransactionArg, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone)] /// Delete up tree options pub struct DeleteUpTreeOptions { @@ -31,7 +31,7 @@ pub struct DeleteUpTreeOptions { pub stop_path_height: Option, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for DeleteUpTreeOptions { fn default() -> Self { DeleteUpTreeOptions { @@ -44,7 +44,7 @@ impl Default for DeleteUpTreeOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl DeleteUpTreeOptions { fn to_delete_options(&self) -> DeleteOptions { DeleteOptions { @@ -56,7 +56,7 @@ impl DeleteUpTreeOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Delete up tree while empty will delete nodes while they are empty up a /// tree. diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 9244c60b..9e24ad3e 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -2,27 +2,27 @@ #[cfg(feature = "estimated_costs")] mod average_case; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod delete_up_tree; #[cfg(feature = "estimated_costs")] mod worst_case; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::collections::{BTreeSet, HashMap}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use delete_up_tree::DeleteUpTreeOptions; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, storage_cost::removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, CostResult, CostsExt, OperationCost, }; use grovedb_merk::{proofs::Query, KVIterator}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{Error as MerkError, Merk, MerkOptions}; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::{ rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, Storage, StorageBatch, StorageContext, @@ -31,7 +31,7 @@ use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ batch::{GroveOp, QualifiedGroveDbOp}, util::storage_context_with_parent_optional_tx, @@ -39,7 +39,7 @@ use crate::{ }; use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone)] /// Clear options pub struct ClearOptions { @@ -52,7 +52,7 @@ pub struct ClearOptions { pub trying_to_clear_with_subtrees_returns_error: bool, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for ClearOptions { fn default() -> Self { ClearOptions { @@ -63,7 +63,7 @@ impl Default for ClearOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone)] /// Delete options pub struct DeleteOptions { @@ -77,7 +77,7 @@ pub struct DeleteOptions { pub validate_tree_at_path_exists: bool, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for DeleteOptions { fn default() -> Self { DeleteOptions { @@ -89,7 +89,7 @@ impl Default for DeleteOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl DeleteOptions { fn as_merk_options(&self) -> MerkOptions { MerkOptions { @@ -98,7 +98,7 @@ impl DeleteOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Delete an element at a specified subtree path and key. pub fn delete<'b, B, P>( @@ -994,7 +994,7 @@ impl GroveDb { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_costs::{ diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index 8533cde5..effcb5fe 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -18,7 +18,7 @@ use crate::{ Error, GroveDb, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Worst case costs for delete operations for delete up tree while empty pub fn worst_case_delete_operations_for_delete_up_tree_while_empty<'db, S: Storage<'db>>( @@ -26,7 +26,7 @@ impl GroveDb { key: &KeyInfo, stop_path_height: Option, validate: bool, - intermediate_tree_info: IntMap<(bool, u32)>, + intermediate_tree_info: IntMap, max_element_size: u32, grove_version: &GroveVersion, ) -> CostResult, Error> { diff --git a/grovedb/src/operations/get/average_case.rs b/grovedb/src/operations/get/average_case.rs index aca4426d..0cb44462 100644 --- a/grovedb/src/operations/get/average_case.rs +++ b/grovedb/src/operations/get/average_case.rs @@ -1,19 +1,19 @@ //! Average case get costs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::OperationCost; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; use crate::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, GroveDb, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Get the Operation Cost for a has query that doesn't follow /// references with the following parameters diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index b6289699..4b1d0ccd 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -2,38 +2,38 @@ #[cfg(feature = "estimated_costs")] mod average_case; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod query; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use query::QueryItemOrSumReturnType; #[cfg(feature = "estimated_costs")] mod worst_case; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::collections::HashSet; use grovedb_costs::cost_return_on_error_no_add; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::StorageContext; use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ reference_path::{path_from_reference_path_type, path_from_reference_qualified_path_type}, util::storage_context_optional_tx, Element, Error, GroveDb, Transaction, TransactionArg, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Limit of possible indirections pub const MAX_REFERENCE_HOPS: usize = 10; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Get an element from the backing store /// Merk Caching is on by default diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 81046dbf..0e97f7fb 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -1,30 +1,30 @@ //! Query operations use grovedb_costs::cost_return_on_error_default; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; use grovedb_version::{ check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::element::SumValue; use crate::{ element::QueryOptions, operations::proof::ProveOptions, query_result_type::PathKeyOptionalElementTrio, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ query_result_type::{QueryResultElement, QueryResultElements, QueryResultType}, reference_path::ReferencePathType, Element, Error, GroveDb, PathQuery, TransactionArg, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Debug, Eq, PartialEq, Clone)] /// A return type for query_item_value_or_sum pub enum QueryItemOrSumReturnType { @@ -34,7 +34,7 @@ pub enum QueryItemOrSumReturnType { SumValue(SumValue), } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Encoded query for multiple path queries pub fn query_encoded_many( @@ -688,7 +688,7 @@ where { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use std::collections::HashMap; diff --git a/grovedb/src/operations/get/worst_case.rs b/grovedb/src/operations/get/worst_case.rs index 7554a911..591d6150 100644 --- a/grovedb/src/operations/get/worst_case.rs +++ b/grovedb/src/operations/get/worst_case.rs @@ -1,19 +1,19 @@ //! Worst case get costs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::OperationCost; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; use crate::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, GroveDb, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Worst case cost for has raw pub fn worst_case_for_has_raw( diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index 5926fedd..57a59380 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -1,16 +1,16 @@ //! Insert operations -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{collections::HashMap, option::Option::None}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_merk::{tree::NULL_HASH, Merk, MerkOptions}; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::{ PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, }; @@ -19,13 +19,13 @@ use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ reference_path::path_from_reference_path_type, Element, Error, GroveDb, Transaction, TransactionArg, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone)] /// Insert options pub struct InsertOptions { @@ -37,7 +37,7 @@ pub struct InsertOptions { pub base_root_storage_is_free: bool, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for InsertOptions { fn default() -> Self { InsertOptions { @@ -48,7 +48,7 @@ impl Default for InsertOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl InsertOptions { fn checks_for_override(&self) -> bool { self.validate_insertion_does_not_override_tree || self.validate_insertion_does_not_override @@ -61,7 +61,7 @@ impl InsertOptions { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Insert a GroveDB element given a path to the subtree and the key to /// insert at @@ -640,7 +640,7 @@ impl GroveDb { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_costs::{ diff --git a/grovedb/src/operations/is_empty_tree.rs b/grovedb/src/operations/is_empty_tree.rs index 07c34999..a007d219 100644 --- a/grovedb/src/operations/is_empty_tree.rs +++ b/grovedb/src/operations/is_empty_tree.rs @@ -1,16 +1,16 @@ //! Check if empty tree operations -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::error::GroveVersionError; use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{util::merk_optional_tx, Element, Error, GroveDb, TransactionArg}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { /// Check if it's an empty tree pub fn is_empty_tree<'b, B, P>( diff --git a/grovedb/src/operations/mod.rs b/grovedb/src/operations/mod.rs index ba9b8599..08f0918d 100644 --- a/grovedb/src/operations/mod.rs +++ b/grovedb/src/operations/mod.rs @@ -1,18 +1,18 @@ //! Operations for the manipulation of GroveDB state -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub(crate) mod auxiliary; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod delete; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub(crate) mod get; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod insert; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub(crate) mod is_empty_tree; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod proof; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use get::{QueryItemOrSumReturnType, MAX_REFERENCE_HOPS}; diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index 6e814f67..d8bfc209 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -173,11 +173,10 @@ impl GroveDb { ) ); - Ok(GroveDBProofV0 { + Ok(GroveDBProof::V0(GroveDBProofV0 { root_layer, prove_options, - } - .into()) + })) .wrap_with_cost(cost) } diff --git a/grovedb/src/operations/proof/mod.rs b/grovedb/src/operations/proof/mod.rs index c90e176b..cadc6032 100644 --- a/grovedb/src/operations/proof/mod.rs +++ b/grovedb/src/operations/proof/mod.rs @@ -1,6 +1,6 @@ //! Proof operations -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod generate; pub mod util; mod verify; @@ -8,7 +8,6 @@ mod verify; use std::{collections::BTreeMap, fmt}; use bincode::{Decode, Encode}; -use derive_more::From; use grovedb_merk::{ proofs::{ query::{Key, VerifyOptions}, @@ -64,7 +63,7 @@ pub struct LayerProof { pub lower_layers: BTreeMap, } -#[derive(Encode, Decode, From)] +#[derive(Encode, Decode)] pub enum GroveDBProof { V0(GroveDBProofV0), } diff --git a/grovedb/src/operations/proof/util.rs b/grovedb/src/operations/proof/util.rs index a33954d1..1f710490 100644 --- a/grovedb/src/operations/proof/util.rs +++ b/grovedb/src/operations/proof/util.rs @@ -8,20 +8,20 @@ use grovedb_version::version::GroveVersion; use crate::Element; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub type ProvedKeyValues = Vec; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub type ProvedKeyOptionalValues = Vec; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub type ProvedPathKeyValues = Vec; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub type ProvedPathKeyOptionalValues = Vec; /// Proved path-key-value -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, PartialEq, Eq)] pub struct ProvedPathKeyOptionalValue { /// Path @@ -34,7 +34,7 @@ pub struct ProvedPathKeyOptionalValue { pub proof: CryptoHash, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for ProvedPathKeyOptionalValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "ProvedPathKeyValue {{")?; @@ -59,7 +59,7 @@ impl fmt::Display for ProvedPathKeyOptionalValue { } /// Proved path-key-value -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, PartialEq, Eq)] pub struct ProvedPathKeyValue { /// Path @@ -72,7 +72,7 @@ pub struct ProvedPathKeyValue { pub proof: CryptoHash, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for ProvedPathKeyValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "ProvedPathKeyValue {{")?; diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 07930c4e..890da1db 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -7,21 +7,21 @@ use std::{ }; use bincode::{Decode, Encode}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_merk::proofs::query::query_item::QueryItem; use grovedb_merk::proofs::query::{Key, SubqueryBranch}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_merk::proofs::Query; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; use indexmap::IndexMap; use crate::operations::proof::util::hex_to_ascii; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::query_result_type::PathKey; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::Error; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, Clone, PartialEq, Encode, Decode)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] /// Path query @@ -35,7 +35,7 @@ pub struct PathQuery { pub query: SizedQuery, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for PathQuery { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "PathQuery {{ path: [")?; @@ -49,7 +49,7 @@ impl fmt::Display for PathQuery { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, Clone, PartialEq, Encode, Decode)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] /// Holds a query to apply to a tree and an optional limit/offset value. @@ -63,7 +63,7 @@ pub struct SizedQuery { pub offset: Option, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for SizedQuery { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SizedQuery {{ query: {}", self.query)?; @@ -77,7 +77,7 @@ impl fmt::Display for SizedQuery { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl SizedQuery { /// New sized query pub const fn new(query: Query, limit: Option, offset: Option) -> Self { @@ -107,7 +107,7 @@ impl SizedQuery { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl PathQuery { /// New path query pub const fn new(path: Vec>, query: SizedQuery) -> Self { @@ -455,7 +455,7 @@ impl PathQuery { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, Clone, PartialEq)] pub enum HasSubquery<'a> { NoSubquery, @@ -463,7 +463,7 @@ pub enum HasSubquery<'a> { Conditionally(Cow<'a, IndexMap>), } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl<'a> fmt::Display for HasSubquery<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -495,7 +495,7 @@ impl<'a> HasSubquery<'a> { /// This represents a query where the items might be borrowed, it is used to get /// subquery information -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, Clone, PartialEq)] pub struct SinglePathSubquery<'a> { /// Items @@ -508,7 +508,7 @@ pub struct SinglePathSubquery<'a> { pub in_path: Option>, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl<'a> fmt::Display for SinglePathSubquery<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "InternalCowItemsQuery {{")?; @@ -582,7 +582,7 @@ impl<'a> SinglePathSubquery<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use std::{borrow::Cow, ops::RangeFull}; diff --git a/grovedb/src/query_result_type.rs b/grovedb/src/query_result_type.rs index 1850544e..a8d329f7 100644 --- a/grovedb/src/query_result_type.rs +++ b/grovedb/src/query_result_type.rs @@ -462,7 +462,7 @@ impl fmt::Display for QueryResultElement { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl QueryResultElement { /// Map element pub fn map_element( @@ -487,23 +487,23 @@ impl QueryResultElement { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for key-element common pattern. pub type KeyElementPair = (Key, Element); -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for key optional_element common pattern. pub type KeyOptionalElementPair = (Key, Option); -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for path-key-element common pattern. pub type PathKeyElementTrio = (Path, Key, Element); -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for path - key - optional_element common pattern. pub type PathKeyOptionalElementTrio = (Path, Key, Option); -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl TryFromVersioned for PathKeyOptionalElementTrio { type Error = Error; @@ -520,7 +520,7 @@ impl TryFromVersioned for PathKeyOptionalElementTrio { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl TryFromVersioned for PathKeyOptionalElementTrio { type Error = Error; @@ -540,7 +540,7 @@ impl TryFromVersioned for PathKeyOptionalElementTrio } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_version::{version::GroveVersion, TryIntoVersioned}; diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index fcfeee6e..afd85260 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -1,19 +1,19 @@ //! Space efficient methods for referencing other elements in GroveDB -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use std::fmt; use bincode::{Decode, Encode}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_visualize::visualize_to_vec; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::Error; -#[cfg(any(feature = "full", feature = "verify"))] -#[cfg_attr(not(any(feature = "full", feature = "visualize")), derive(Debug))] +#[cfg(any(feature = "minimal", feature = "verify"))] +#[cfg_attr(not(any(feature = "minimal", feature = "visualize")), derive(Debug))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] /// Reference path variants #[derive(Hash, Eq, PartialEq, Encode, Decode, Clone)] @@ -110,7 +110,7 @@ impl fmt::Display for ReferencePathType { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl ReferencePathType { /// Given the reference path type and the current qualified path (path+key), /// this computes the absolute path of the item the reference is pointing @@ -134,7 +134,7 @@ impl ReferencePathType { } } -#[cfg(any(feature = "full", feature = "visualize"))] +#[cfg(any(feature = "minimal", feature = "visualize"))] impl fmt::Debug for ReferencePathType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut v = Vec::new(); @@ -144,7 +144,7 @@ impl fmt::Debug for ReferencePathType { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Given the reference path type and the current qualified path (path+key), /// this computes the absolute path of the item the reference is pointing to. pub fn path_from_reference_qualified_path_type>( @@ -161,7 +161,7 @@ pub fn path_from_reference_qualified_path_type>( } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Given the reference path type, the current path and the terminal key, this /// computes the absolute path of the item the reference is pointing to. pub fn path_from_reference_path_type>( @@ -285,7 +285,7 @@ pub fn path_from_reference_path_type>( } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl ReferencePathType { /// Serialized size pub fn serialized_size(&self) -> usize { @@ -320,7 +320,7 @@ impl ReferencePathType { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_merk::proofs::Query; diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 1cd50519..0996c324 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -20,7 +20,7 @@ pub type ChunkIdentifier = (crate::SubtreePrefix, Option>, bool, Vec pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl GroveDb { pub fn start_syncing_session(&self, app_hash: [u8; 32]) -> Pin> { MultiStateSyncSession::new(self.start_transaction(), app_hash) diff --git a/merk/Cargo.toml b/merk/Cargo.toml index cdb47bdf..40510f6b 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -18,56 +18,45 @@ grovedb-version = { version = "2.1.0", path = "../grovedb-version" } grovedb-visualize = { version = "2.1.0", path = "../visualize" } bincode = { version = "2.0.0-rc.3" } -failure = "0.1.8" hex = "0.4.3" indexmap = "2.2.6" integer-encoding = "4.0.0" -thiserror = "1.0.58" +thiserror = "2.0.11" serde = { version = "1.0.210", features = ["derive"], optional = true } - -[dependencies.time] -version = "0.3.34" -optional = true +rand = { version = "0.8.5", features = ["small_rng"], optional = true } [dependencies.colored] -version = "2.1.0" +version = "3.0.0" optional = true [dependencies.num_cpus] version = "1.16.0" optional = true -[dependencies.byteorder] -version = "1.5.0" -optional = true - [dependencies.ed] version = "0.2.2" optional = true [dependencies.blake3] -version = "1.5.1" -optional = true - -[dependencies.rand] -version = "0.8.5" -features = ["small_rng"] +version = "1.5.5" optional = true [features] default = ["full"] proof_debug = [] serde = ["dep:serde", "indexmap/serde"] -full = ["rand", - "time", - "colored", - "num_cpus", - "byteorder", +minimal = ["num_cpus", "ed", "blake3", "grovedb-storage", "grovedb-storage/rocksdb_storage" ] +full = ["minimal", + "test_utils", + "colored_debug", +] +test_utils = ["rand"] +colored_debug = ["colored"] verify = [ "ed", "blake3" diff --git a/merk/src/error.rs b/merk/src/error.rs index c365b898..8fdc1cfc 100644 --- a/merk/src/error.rs +++ b/merk/src/error.rs @@ -1,8 +1,8 @@ //! Errors -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::proofs::chunk::error::ChunkError; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, thiserror::Error)] /// Errors pub enum Error { @@ -36,7 +36,7 @@ pub enum Error { CorruptedState(&'static str), /// Chunking error - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] #[error("chunking error {0}")] ChunkingError(ChunkError), @@ -46,7 +46,7 @@ pub enum Error { OldChunkingError(&'static str), /// Chunk restoring error - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] #[error("chunk restoring error {0}")] ChunkRestoringError(ChunkError), @@ -99,7 +99,7 @@ pub enum Error { #[error("client corruption error {0}")] ClientCorruptionError(String), - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Storage error #[error("storage error {0}")] StorageError(grovedb_storage::Error), diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index fc08ab45..6e877efe 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -1,11 +1,11 @@ //! Average case costs for Merk -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ error::Error, estimated_costs::LAYER_COST_SIZE, @@ -13,20 +13,20 @@ use crate::{ HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, HASH_LENGTH_U32, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Average key size pub type AverageKeySize = u8; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Average value size pub type AverageValueSize = u32; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Average flags size pub type AverageFlagsSize = u32; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Weight pub type Weight = u8; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated number of sum trees #[derive(Default)] @@ -45,8 +45,8 @@ pub enum EstimatedSumTrees { AllSumTrees, } -#[cfg(feature = "full")] -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] +#[cfg(feature = "minimal")] impl EstimatedSumTrees { fn estimated_size(&self) -> Result { match self { @@ -62,7 +62,7 @@ impl EstimatedSumTrees { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated layer sizes pub enum EstimatedLayerSizes { @@ -98,7 +98,7 @@ pub enum EstimatedLayerSizes { }, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl EstimatedLayerSizes { /// Return average flags size for layer pub fn layered_flags_size(&self) -> Result<&Option, Error> { @@ -220,17 +220,17 @@ impl EstimatedLayerSizes { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Approximate element count pub type ApproximateElementCount = u32; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Estimated level number pub type EstimatedLevelNumber = u32; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Estimated to be empty pub type EstimatedToBeEmpty = bool; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Information on an estimated layer pub struct EstimatedLayerInformation { @@ -242,10 +242,10 @@ pub struct EstimatedLayerInformation { pub estimated_layer_sizes: EstimatedLayerSizes, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl EstimatedLayerInformation {} -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Estimated elements and level number of a layer pub enum EstimatedLayerCount { @@ -257,7 +257,7 @@ pub enum EstimatedLayerCount { EstimatedLevel(EstimatedLevelNumber, EstimatedToBeEmpty), } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl EstimatedLayerCount { /// Returns true if the tree is estimated to be empty. pub fn estimated_to_be_empty(&self) -> bool { @@ -285,7 +285,7 @@ impl EstimatedLayerCount { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeNode { /// Return estimate of average encoded tree size pub fn average_case_encoded_tree_size( @@ -301,7 +301,7 @@ impl TreeNode { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for getting a merk node pub fn add_average_case_get_merk_node( cost: &mut OperationCost, @@ -323,7 +323,7 @@ pub fn add_average_case_get_merk_node( Ok(()) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for getting a merk tree pub fn add_average_case_merk_has_value( cost: &mut OperationCost, @@ -334,7 +334,7 @@ pub fn add_average_case_merk_has_value( cost.storage_loaded_bytes += (not_prefixed_key_len + estimated_element_size) as u64; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for insertion into merk pub fn add_average_case_merk_replace_layered( cost: &mut OperationCost, @@ -357,7 +357,7 @@ pub fn add_average_case_merk_replace_layered( cost.hash_node_calls += 2; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add average case for deletion from merk pub fn add_average_case_merk_delete_layered( cost: &mut OperationCost, @@ -369,7 +369,7 @@ pub fn add_average_case_merk_delete_layered( cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add average case for deletion from merk pub fn add_average_case_merk_delete(cost: &mut OperationCost, _key_len: u32, value_len: u32) { // todo: verify this @@ -377,7 +377,7 @@ pub fn add_average_case_merk_delete(cost: &mut OperationCost, _key_len: u32, val cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] const fn node_hash_update_count() -> u32 { // It's a hash of node hash, left and right let bytes = HASH_LENGTH * 3; @@ -386,20 +386,20 @@ const fn node_hash_update_count() -> u32 { 1 + ((bytes - 1) / HASH_BLOCK_SIZE) as u32 } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for getting a merk tree root hash pub fn add_average_case_merk_root_hash(cost: &mut OperationCost) { cost.hash_node_calls += node_hash_update_count(); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Average case cost of propagating a merk pub fn average_case_merk_propagate(input: &EstimatedLayerInformation) -> CostResult<(), Error> { let mut cost = OperationCost::default(); add_average_case_merk_propagate(&mut cost, input).wrap_with_cost(cost) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add average case cost for propagating a merk pub fn add_average_case_merk_propagate( cost: &mut OperationCost, diff --git a/merk/src/estimated_costs/mod.rs b/merk/src/estimated_costs/mod.rs index bd669db1..7648246b 100644 --- a/merk/src/estimated_costs/mod.rs +++ b/merk/src/estimated_costs/mod.rs @@ -1,20 +1,20 @@ //! Estimated costs for Merk -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::OperationCost; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{tree::kv::KV, HASH_BLOCK_SIZE_U32, HASH_LENGTH_U32}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod average_case_costs; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod worst_case_costs; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// The cost of a subtree layer /// It is 3 because we have: /// 1 byte for the element type @@ -22,16 +22,16 @@ pub mod worst_case_costs; /// 1 byte for the flag option pub const LAYER_COST_SIZE: u32 = 3; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// The cost of a sum value pub const SUM_VALUE_EXTRA_COST: u32 = 9; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// The cost of a summed subtree layer /// This is the layer size + 9 for the encoded value pub const SUM_LAYER_COST_SIZE: u32 = LAYER_COST_SIZE + SUM_VALUE_EXTRA_COST; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl KV { fn encoded_kv_node_size(element_size: u32, is_sum_node: bool) -> u32 { // We always charge 8 bytes for the sum node (even though @@ -45,7 +45,7 @@ impl KV { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add cost case for insertion into merk pub fn add_cost_case_merk_insert( cost: &mut OperationCost, @@ -69,7 +69,7 @@ pub fn add_cost_case_merk_insert( cost.hash_node_calls += 2; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add cost case for insertion into merk pub fn add_cost_case_merk_insert_layered( cost: &mut OperationCost, @@ -95,7 +95,7 @@ pub fn add_cost_case_merk_insert_layered( cost.hash_node_calls += 2; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add cost case for insertion into merk pub fn add_cost_case_merk_replace( cost: &mut OperationCost, @@ -117,7 +117,7 @@ pub fn add_cost_case_merk_replace( cost.hash_node_calls += 2; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add cost case for replacement in merk when the value size is known to not /// change pub fn add_cost_case_merk_replace_same_size( @@ -142,7 +142,7 @@ pub fn add_cost_case_merk_replace_same_size( cost.hash_node_calls += 2; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add cost case for insertion into merk pub fn add_cost_case_merk_replace_layered( cost: &mut OperationCost, @@ -168,7 +168,7 @@ pub fn add_cost_case_merk_replace_layered( cost.hash_node_calls += 2; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add cost case for replacement in merk when the value size is known to not /// change pub fn add_cost_case_merk_patch( diff --git a/merk/src/estimated_costs/worst_case_costs.rs b/merk/src/estimated_costs/worst_case_costs.rs index 9ae6c2b3..3cbd6399 100644 --- a/merk/src/estimated_costs/worst_case_costs.rs +++ b/merk/src/estimated_costs/worst_case_costs.rs @@ -30,10 +30,10 @@ use std::cmp::Ordering; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ error::Error, merk::defaults::MAX_PREFIXED_KEY_SIZE, @@ -41,7 +41,7 @@ use crate::{ HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, PartialEq, Eq, Debug)] /// Worst case layer info pub enum WorstCaseLayerInformation { @@ -51,7 +51,7 @@ pub enum WorstCaseLayerInformation { NumberOfLevels(u32), } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeNode { /// Return worst case size of encoded tree pub fn worst_case_encoded_tree_size( @@ -67,7 +67,7 @@ impl TreeNode { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for getting a merk node pub fn add_worst_case_get_merk_node( cost: &mut OperationCost, @@ -87,7 +87,7 @@ pub fn add_worst_case_get_merk_node( Ok(()) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for getting a merk tree pub fn add_worst_case_merk_has_value( cost: &mut OperationCost, @@ -98,7 +98,7 @@ pub fn add_worst_case_merk_has_value( cost.storage_loaded_bytes += not_prefixed_key_len as u64 + max_element_size as u64; } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for insertion into merk pub fn add_worst_case_merk_insert( cost: &mut OperationCost, @@ -113,7 +113,7 @@ pub fn add_worst_case_merk_insert( cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for insertion into merk pub fn add_worst_case_merk_replace_layered( cost: &mut OperationCost, @@ -128,7 +128,7 @@ pub fn add_worst_case_merk_replace_layered( // 37 + 35 + key_len } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add average case for deletion from merk pub fn add_worst_case_merk_delete_layered(cost: &mut OperationCost, _key_len: u32, value_len: u32) { // todo: verify this @@ -136,7 +136,7 @@ pub fn add_worst_case_merk_delete_layered(cost: &mut OperationCost, _key_len: u3 cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add average case for deletion from merk pub fn add_worst_case_merk_delete(cost: &mut OperationCost, _key_len: u32, value_len: u32) { // todo: verify this @@ -144,7 +144,7 @@ pub fn add_worst_case_merk_delete(cost: &mut OperationCost, _key_len: u32, value cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] const fn node_hash_update_count() -> u32 { // It's a hash of node hash, left and right let bytes = HASH_LENGTH * 3; @@ -153,27 +153,27 @@ const fn node_hash_update_count() -> u32 { 1 + ((bytes - 1) / HASH_BLOCK_SIZE) as u32 } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case for getting a merk tree root hash pub fn add_worst_case_merk_root_hash(cost: &mut OperationCost) { cost.hash_node_calls += node_hash_update_count(); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Merk biggest value size pub const MERK_BIGGEST_VALUE_SIZE: u32 = u16::MAX as u32; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Merk biggest key size pub const MERK_BIGGEST_KEY_SIZE: u32 = 256; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Worst case cost of a merk propagation pub fn worst_case_merk_propagate(input: &WorstCaseLayerInformation) -> CostResult<(), Error> { let mut cost = OperationCost::default(); add_worst_case_merk_propagate(&mut cost, input).wrap_with_cost(cost) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case cost of a merk propagation pub fn add_worst_case_merk_propagate( cost: &mut OperationCost, @@ -216,7 +216,7 @@ pub fn add_worst_case_merk_propagate( Ok(()) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Add worst case cost for is_empty_tree_except pub fn add_worst_case_cost_for_is_empty_tree_except( cost: &mut OperationCost, @@ -227,7 +227,7 @@ pub fn add_worst_case_cost_for_is_empty_tree_except( } /// Add average case cost for is_empty_tree_except -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub fn add_average_case_cost_for_is_empty_tree_except( cost: &mut OperationCost, except_keys_count: u16, diff --git a/merk/src/lib.rs b/merk/src/lib.rs index d746a885..0291314b 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -31,60 +31,60 @@ // #![deny(missing_docs)] /// The top-level store API. -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod merk; #[cfg(feature = "grovedbg")] pub mod debugger; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use crate::merk::{chunks::ChunkProducer, options::MerkOptions, restore::Restorer}; /// Provides a container type that allows temporarily taking ownership of a /// value. // TODO: move this into its own crate -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod owner; /// Algorithms for generating and verifying Merkle proofs. -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod proofs; /// Various helpers useful for tests or benchmarks. -#[cfg(feature = "full")] +#[cfg(feature = "test_utils")] pub mod test_utils; /// The core tree data structure. -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod tree; /// Errors -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod error; /// Estimated costs -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod estimated_costs; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod visualize; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use ed; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use error::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use tree::{ BatchEntry, Link, MerkBatch, Op, PanicSource, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, HASH_LENGTH_U32, HASH_LENGTH_U32_X2, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use tree::{CryptoHash, TreeFeatureType}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use crate::merk::{ defaults::ROOT_KEY_KEY, prove::{ProofConstructionResult, ProofWithoutEncodingResult}, IsSumTree, KVIterator, Merk, MerkType, RootHashKeyAndSum, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use crate::visualize::VisualizeableMerk; diff --git a/merk/src/merk/defaults.rs b/merk/src/merk/defaults.rs index 87734e9b..5cda6552 100644 --- a/merk/src/merk/defaults.rs +++ b/merk/src/merk/defaults.rs @@ -28,10 +28,10 @@ //! Default values -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Root key key pub const ROOT_KEY_KEY: &[u8] = b"r"; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const MAX_UPDATE_VALUE_BASED_ON_COSTS_TIMES: u8 = 8; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const MAX_PREFIXED_KEY_SIZE: u64 = 288; diff --git a/merk/src/merk/options.rs b/merk/src/merk/options.rs index bfd78e0e..507354f8 100644 --- a/merk/src/merk/options.rs +++ b/merk/src/merk/options.rs @@ -28,14 +28,14 @@ //! Merk options -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Merk options pub struct MerkOptions { /// Base root storage is free? pub base_root_storage_is_free: bool, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for MerkOptions { fn default() -> Self { Self { diff --git a/merk/src/proofs/chunk.rs b/merk/src/proofs/chunk.rs index 063a3575..7ba02c88 100644 --- a/merk/src/proofs/chunk.rs +++ b/merk/src/proofs/chunk.rs @@ -1,9 +1,9 @@ //! Chunk proofs mod binary_range; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod chunk; pub mod chunk_op; pub mod error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod util; diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index eb1c055b..5996e388 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -1,19 +1,19 @@ //! Proofs encoding -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use std::io::{Read, Write}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use ed::Terminated; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use ed::{Decode, Encode, Error as EdError}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use super::{Node, Op}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::{error::Error, tree::HASH_LENGTH, TreeFeatureType}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Encode for Op { fn encode_into(&self, dest: &mut W) -> ed::Result<()> { match self { @@ -178,7 +178,7 @@ impl Encode for Op { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Decode for Op { fn decode(mut input: R) -> ed::Result { let variant: u8 = Decode::decode(&mut input)?; @@ -352,11 +352,11 @@ impl Decode for Op { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Terminated for Op {} impl Op { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] fn encode_into(&self, dest: &mut W) -> Result<(), Error> { Encode::encode_into(self, dest).map_err(|e| match e { EdError::UnexpectedByte(byte) => Error::ProofCreationError(format!( @@ -368,12 +368,12 @@ impl Op { }) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] fn encoding_length(&self) -> usize { Encode::encoding_length(self).unwrap() } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decode pub fn decode(bytes: &[u8]) -> Result { Decode::decode(bytes).map_err(|e| match e { @@ -387,7 +387,7 @@ impl Op { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Encode into pub fn encode_into<'a, T: Iterator>(ops: T, output: &mut Vec) { for op in ops { @@ -395,14 +395,14 @@ pub fn encode_into<'a, T: Iterator>(ops: T, output: &mut Vec) } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Decoder pub struct Decoder<'a> { offset: usize, bytes: &'a [u8], } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl<'a> Decoder<'a> { /// New decoder pub const fn new(proof_bytes: &'a [u8]) -> Self { @@ -413,7 +413,7 @@ impl<'a> Decoder<'a> { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl<'a> Iterator for Decoder<'a> { type Item = Result; @@ -431,7 +431,7 @@ impl<'a> Iterator for Decoder<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use super::super::{Node, Op}; diff --git a/merk/src/proofs/mod.rs b/merk/src/proofs/mod.rs index 45f4b2e9..62ad2fdb 100644 --- a/merk/src/proofs/mod.rs +++ b/merk/src/proofs/mod.rs @@ -1,27 +1,27 @@ //! Merk proofs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod chunk; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod encoding; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod query; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod tree; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use encoding::encode_into; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use encoding::Decoder; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use query::Query; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use tree::Tree; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::{tree::CryptoHash, TreeFeatureType}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// A proof operator, executed to verify the data in a Merkle proof. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Op { @@ -54,7 +54,7 @@ pub enum Op { ChildInverted, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// A selected piece of data about a single tree node, to be contained in a /// `Push` operator in a proof. #[derive(Clone, Debug, PartialEq, Eq)] @@ -86,7 +86,7 @@ pub enum Node { use std::fmt; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for Node { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let node_string = match self { diff --git a/merk/src/proofs/query/common_path.rs b/merk/src/proofs/query/common_path.rs index b9c9453d..b660da0f 100644 --- a/merk/src/proofs/query/common_path.rs +++ b/merk/src/proofs/query/common_path.rs @@ -1,6 +1,6 @@ use crate::proofs::query::Path; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// CommonPathResult is the result of trying to find the common path between two /// paths #[derive(Debug, Default, Clone, PartialEq, Eq)] diff --git a/merk/src/proofs/query/insert.rs b/merk/src/proofs/query/insert.rs index 02b87866..8b76fd6e 100644 --- a/merk/src/proofs/query/insert.rs +++ b/merk/src/proofs/query/insert.rs @@ -2,7 +2,7 @@ use std::ops::{Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToIncl use crate::proofs::{query::query_item::QueryItem, Query}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Query { /// Adds an individual key to the query, so that its value (or its absence) /// in the tree will be included in the resulting proof. diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 9e741cea..8c3b5652 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -2,30 +2,30 @@ #![allow(unstable_name_collisions)] -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{ collections::{btree_map, btree_map::Iter, BTreeMap}, ops::{Bound, RangeBounds}, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::super::Node; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::error::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// `MapBuilder` allows a consumer to construct a `Map` by inserting the nodes /// contained in a proof, in key-order. pub struct MapBuilder(Map); -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for MapBuilder { fn default() -> Self { MapBuilder::new() } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl MapBuilder { /// Creates a new `MapBuilder` with an empty internal `Map`. pub fn new() -> Self { @@ -65,7 +65,7 @@ impl MapBuilder { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// `Map` stores data extracted from a proof (which has already been verified /// against a known root hash), and allows a consumer to access the data by /// looking up individual keys using the `get` method, or iterating over ranges @@ -76,7 +76,7 @@ pub struct Map { right_edge: bool, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Map { /// Gets the value for a single key, or `None` if the key was proven to not /// exist in the tree. If the proof does not include the data and also does @@ -120,7 +120,7 @@ impl Map { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Returns `None` for `Bound::Unbounded`, or the inner key value for /// `Bound::Included` and `Bound::Excluded`. fn bound_to_inner(bound: Bound) -> Option { @@ -130,7 +130,7 @@ fn bound_to_inner(bound: Bound) -> Option { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] fn bound_to_vec(bound: Bound<&&[u8]>) -> Bound> { match bound { Bound::Unbounded => Bound::Unbounded, @@ -139,7 +139,7 @@ fn bound_to_vec(bound: Bound<&&[u8]>) -> Bound> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] fn bounds_to_vec<'a, R: RangeBounds<&'a [u8]>>(bounds: R) -> impl RangeBounds> { ( bound_to_vec(bounds.start_bound()), @@ -147,7 +147,7 @@ fn bounds_to_vec<'a, R: RangeBounds<&'a [u8]>>(bounds: R) -> impl RangeBounds { prev_key: Option>, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> Range<'a> { /// Returns an error if the proof does not properly prove the end of the /// range. @@ -195,7 +195,7 @@ impl<'a> Range<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> Iterator for Range<'a> { type Item = Result<(&'a [u8], &'a [u8]), Error>; @@ -236,7 +236,7 @@ impl<'a> Iterator for Range<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use super::*; diff --git a/merk/src/proofs/query/merge.rs b/merk/src/proofs/query/merge.rs index f44dd66b..56af059f 100644 --- a/merk/src/proofs/query/merge.rs +++ b/merk/src/proofs/query/merge.rs @@ -238,7 +238,7 @@ impl SubqueryBranch { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Query { fn merge_default_subquerys_branch_subquery( &mut self, diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index b1543153..bd33d4b0 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -1,73 +1,73 @@ //! Query proofs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod map; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod common_path; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod insert; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod merge; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod query_item; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod verify; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::cmp::Ordering; use std::{collections::HashSet, fmt, ops::RangeFull}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use bincode::{ enc::write::Writer, error::{DecodeError, EncodeError}, BorrowDecode, Decode, Encode, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{cost_return_on_error, CostContext, CostResult, CostsExt, OperationCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::version::GroveVersion; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use indexmap::IndexMap; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use map::*; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use query_item::intersect::QueryItemIntersectionResult; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use query_item::QueryItem; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use verify::ProofAbsenceLimit; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use verify::VerifyOptions; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use verify::{ProofVerificationResult, ProvedKeyOptionalValue, ProvedKeyValue}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use {super::Op, std::collections::LinkedList}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::Node; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::error::Error; use crate::proofs::hex_to_ascii; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::tree::{Fetch, Link, RefWalker}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for a path. pub type Path = Vec>; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for a Key. pub type Key = Vec; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Type alias for path-key common pattern. pub type PathKey = (Path, Key); -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, Default, Clone, PartialEq, Encode, Decode)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] /// Subquery branch @@ -108,7 +108,7 @@ impl SubqueryBranch { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// `Query` represents one or more keys or ranges of keys, which can be used to /// resolve a proof which will include all the requested values. #[derive(Debug, Default, Clone, PartialEq)] @@ -124,7 +124,7 @@ pub struct Query { pub left_to_right: bool, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Encode for Query { fn encode(&self, encoder: &mut E) -> Result<(), EncodeError> { // Encode the items vector @@ -158,7 +158,7 @@ impl Encode for Query { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Decode for Query { fn decode(decoder: &mut D) -> Result { // Decode the items vector @@ -193,7 +193,7 @@ impl Decode for Query { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl<'de> BorrowDecode<'de> for Query { fn borrow_decode>( decoder: &mut D, @@ -230,7 +230,7 @@ impl<'de> BorrowDecode<'de> for Query { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for SubqueryBranch { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SubqueryBranch {{ ")?; @@ -255,7 +255,7 @@ impl fmt::Display for SubqueryBranch { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for Query { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "Query {{")?; @@ -281,7 +281,7 @@ impl fmt::Display for Query { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Query { /// Creates a new query which contains no items. pub fn new() -> Self { @@ -650,7 +650,7 @@ impl Query { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl> From> for Query { fn from(other: Vec) -> Self { let items = other.into_iter().map(Into::into).collect(); @@ -666,14 +666,14 @@ impl> From> for Query { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl From for Vec { fn from(q: Query) -> Self { q.into_iter().collect() } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl IntoIterator for Query { type IntoIter = as IntoIterator>::IntoIter; type Item = QueryItem; @@ -683,11 +683,11 @@ impl IntoIterator for Query { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Link { /// Creates a `Node::Hash` from this link. Panics if the link is of variant /// `Link::Modified` since its hash has not yet been computed. - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] const fn to_hash_node(&self) -> Node { let hash = match self { Link::Reference { hash, .. } => hash, @@ -701,7 +701,7 @@ impl Link { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a, S> RefWalker<'a, S> where S: Fetch + Sized + Clone, @@ -758,7 +758,7 @@ where /// right edge, respectively. /// /// TODO: Generalize logic and get code to better represent logic - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub(crate) fn create_proof( &mut self, query: &[QueryItem], @@ -965,7 +965,7 @@ where /// Similar to `create_proof`. Recurses into the child on the given side and /// generates a proof for the queried keys. - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] fn create_child_proof( &mut self, left: bool, @@ -1001,7 +1001,7 @@ where } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[allow(deprecated)] #[cfg(test)] mod test { diff --git a/merk/src/proofs/query/query_item/merge.rs b/merk/src/proofs/query/query_item/merge.rs index 9f36cb54..4799fc9a 100644 --- a/merk/src/proofs/query/query_item/merge.rs +++ b/merk/src/proofs/query/query_item/merge.rs @@ -5,7 +5,7 @@ use std::{ use crate::proofs::query::query_item::QueryItem; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl QueryItem { pub(crate) fn merge(&self, other: &Self) -> Self { let lower_unbounded = self.lower_unbounded() || other.lower_unbounded(); diff --git a/merk/src/proofs/query/query_item/mod.rs b/merk/src/proofs/query/query_item/mod.rs index d4f7fd8b..65c6640f 100644 --- a/merk/src/proofs/query/query_item/mod.rs +++ b/merk/src/proofs/query/query_item/mod.rs @@ -1,5 +1,5 @@ pub mod intersect; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod merge; use std::{ @@ -11,20 +11,20 @@ use std::{ }; use bincode::{enc::write::Writer, error::DecodeError, BorrowDecode, Decode, Encode}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{CostContext, CostsExt, OperationCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::RawIterator; #[cfg(feature = "serde")] use serde::de::VariantAccess; #[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::error::Error; use crate::proofs::hex_to_ascii; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// A `QueryItem` represents a key or range of keys to be included in a proof. #[derive(Clone, Debug)] pub enum QueryItem { @@ -183,7 +183,7 @@ impl<'de> Deserialize<'de> for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Encode for QueryItem { fn encode( &self, @@ -238,7 +238,7 @@ impl Encode for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Decode for QueryItem { fn decode(decoder: &mut D) -> Result { let variant_id = u8::decode(decoder)?; @@ -294,7 +294,7 @@ impl Decode for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl<'de> BorrowDecode<'de> for QueryItem { fn borrow_decode>( decoder: &mut D, @@ -352,7 +352,7 @@ impl<'de> BorrowDecode<'de> for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for QueryItem { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -396,7 +396,7 @@ impl fmt::Display for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Hash for QueryItem { fn hash(&self, state: &mut H) { self.enum_value().hash(state); @@ -405,7 +405,7 @@ impl Hash for QueryItem { } impl QueryItem { - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn processing_footprint(&self) -> u32 { match self { QueryItem::Key(key) => key.len() as u32, @@ -417,7 +417,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn lower_bound(&self) -> (Option<&[u8]>, bool) { match self { QueryItem::Key(key) => (Some(key.as_slice()), false), @@ -433,7 +433,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub const fn lower_unbounded(&self) -> bool { match self { QueryItem::Key(_) => false, @@ -449,7 +449,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn upper_bound(&self) -> (Option<&[u8]>, bool) { match self { QueryItem::Key(key) => (Some(key.as_slice()), true), @@ -465,7 +465,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub const fn upper_unbounded(&self) -> bool { match self { QueryItem::Key(_) => false, @@ -481,7 +481,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn contains(&self, key: &[u8]) -> bool { let (lower_bound, lower_bound_non_inclusive) = self.lower_bound(); let (upper_bound, upper_bound_inclusive) = self.upper_bound(); @@ -493,7 +493,7 @@ impl QueryItem { || (Some(key) == upper_bound && upper_bound_inclusive)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] fn enum_value(&self) -> u32 { match self { QueryItem::Key(_) => 0, @@ -509,7 +509,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] fn value_hash(&self, state: &mut H) { match self { QueryItem::Key(key) => key.hash(state), @@ -525,17 +525,17 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub const fn is_key(&self) -> bool { matches!(self, QueryItem::Key(_)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub const fn is_range(&self) -> bool { !matches!(self, QueryItem::Key(_)) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub const fn is_unbounded_range(&self) -> bool { !matches!( self, @@ -543,7 +543,7 @@ impl QueryItem { ) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn keys(&self) -> Result>, Error> { match self { QueryItem::Key(key) => Ok(vec![key.clone()]), @@ -593,7 +593,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn keys_consume(self) -> Result>, Error> { match self { QueryItem::Key(key) => Ok(vec![key]), @@ -643,7 +643,7 @@ impl QueryItem { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub fn seek_for_iter( &self, iter: &mut I, @@ -743,7 +743,7 @@ impl QueryItem { } } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn compare(a: &[u8], b: &[u8]) -> cmp::Ordering { for (ai, bi) in a.iter().zip(b.iter()) { match ai.cmp(bi) { @@ -756,7 +756,7 @@ impl QueryItem { a.len().cmp(&b.len()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub fn iter_is_valid_for_type( &self, iter: &I, @@ -833,30 +833,30 @@ impl QueryItem { is_valid.wrap_with_cost(cost) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn collides_with(&self, other: &Self) -> bool { self.intersect(other).in_both.is_some() } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl PartialEq for QueryItem { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl PartialEq<&[u8]> for QueryItem { fn eq(&self, other: &&[u8]) -> bool { matches!(self.partial_cmp(other), Some(Ordering::Equal)) } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Eq for QueryItem {} -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Ord for QueryItem { fn cmp(&self, other: &Self) -> Ordering { let self_as_range_set = self.to_range_set(); @@ -874,14 +874,14 @@ impl Ord for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl PartialOrd for QueryItem { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl PartialOrd<&[u8]> for QueryItem { fn partial_cmp(&self, other: &&[u8]) -> Option { let other = Self::Key(other.to_vec()); @@ -889,14 +889,14 @@ impl PartialOrd<&[u8]> for QueryItem { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl From> for QueryItem { fn from(key: Vec) -> Self { Self::Key(key) } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use crate::proofs::query::query_item::QueryItem; diff --git a/merk/src/proofs/query/verify.rs b/merk/src/proofs/query/verify.rs index 726145cf..de41006d 100644 --- a/merk/src/proofs/query/verify.rs +++ b/merk/src/proofs/query/verify.rs @@ -1,10 +1,10 @@ -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::collections::LinkedList; use std::fmt; use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::proofs::{ query::{Map, MapBuilder}, Op, @@ -16,10 +16,10 @@ use crate::{ CryptoHash as MerkHash, CryptoHash, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub type ProofAbsenceLimit = (LinkedList, (bool, bool), Option); -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Verify proof against expected hash #[deprecated] #[allow(unused)] @@ -65,7 +65,7 @@ impl Default for VerifyOptions { } impl Query { - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Verifies the encoded proof with the given query /// /// Every key in `keys` is checked to either have a key/value pair in the @@ -361,7 +361,7 @@ impl Query { .wrap_with_cost(cost) } - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] /// Verifies the encoded proof with the given query and expected hash pub fn verify_proof( &self, @@ -385,7 +385,7 @@ impl Query { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(PartialEq, Eq, Debug)] /// Proved key-value pub struct ProvedKeyOptionalValue { @@ -422,7 +422,7 @@ impl TryFrom for ProvedKeyValue { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for ProvedKeyOptionalValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let key_string = if self.key.len() == 1 && self.key[0] < b"0"[0] { @@ -444,7 +444,7 @@ impl fmt::Display for ProvedKeyOptionalValue { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(PartialEq, Eq, Debug)] /// Proved key-value pub struct ProvedKeyValue { @@ -456,7 +456,7 @@ pub struct ProvedKeyValue { pub proof: CryptoHash, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for ProvedKeyValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( @@ -469,7 +469,7 @@ impl fmt::Display for ProvedKeyValue { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(PartialEq, Eq, Debug)] /// Proof verification result pub struct ProofVerificationResult { @@ -479,7 +479,7 @@ pub struct ProofVerificationResult { pub limit: Option, } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl fmt::Display for ProofVerificationResult { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "ProofVerificationResult {{")?; diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index 16655a6d..4b2037fe 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -1,28 +1,28 @@ //! Tree proofs -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::fmt::Debug; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostContext, CostResult, CostsExt, OperationCost, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use super::{Node, Op}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::tree::{combine_hash, kv_digest_to_kv_hash, kv_hash, node_hash, value_hash, NULL_HASH}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::{error::Error, tree::CryptoHash}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ proofs::chunk::chunk::{LEFT, RIGHT}, Link, TreeFeatureType::SummedMerkNode, }; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Contains a tree's child node and its hash. The hash can always be assumed to /// be up-to-date. #[derive(Debug)] @@ -34,7 +34,7 @@ pub struct Child { } impl Child { - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub fn as_link(&self) -> Link { let (key, sum) = match &self.tree.node { Node::KV(key, _) | Node::KVValueHash(key, ..) => (key.as_slice(), None), @@ -63,7 +63,7 @@ impl Child { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// A binary tree data structure used to represent a select subset of a tree /// when verifying Merkle proofs. #[derive(Debug)] @@ -80,7 +80,7 @@ pub struct Tree { pub child_heights: (usize, usize), } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl From for Tree { /// Creates a childless tree with the target node as the `node` field. fn from(node: Node) -> Self { @@ -94,7 +94,7 @@ impl From for Tree { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl PartialEq for Tree { /// Checks equality for the root hashes of the two trees. fn eq(&self, other: &Self) -> bool { @@ -104,7 +104,7 @@ impl PartialEq for Tree { impl Tree { /// Gets or computes the hash for this tree node. - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub fn hash(&self) -> CostContext { fn compute_hash(tree: &Tree, kv_hash: CryptoHash) -> CostContext { node_hash(&kv_hash, &tree.child_hash(true), &tree.child_hash(false)) @@ -138,14 +138,14 @@ impl Tree { /// Creates an iterator that yields the in-order traversal of the nodes at /// the given depth. - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub fn layer(&self, depth: usize) -> LayerIter { LayerIter::new(self, depth) } /// Consumes the `Tree` and does an in-order traversal over all the nodes in /// the tree, calling `visit_node` for each. - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub fn visit_nodes(mut self, visit_node: &mut F) { if let Some(child) = self.left.take() { child.tree.visit_nodes(visit_node); @@ -161,7 +161,7 @@ impl Tree { /// Does an in-order traversal over references to all the nodes in the tree, /// calling `visit_node` for each. - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub fn visit_refs Result<(), Error>>( &self, visit_node: &mut F, @@ -178,7 +178,7 @@ impl Tree { Ok(()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] /// Does an in-order traversal over references to all the nodes in the tree, /// calling `visit_node` for each with the current traversal path. pub fn visit_refs_track_traversal_and_parent< @@ -215,7 +215,7 @@ impl Tree { } /// Returns an immutable reference to the child on the given side, if any. - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub const fn child(&self, left: bool) -> Option<&Child> { if left { self.left.as_ref() @@ -225,7 +225,7 @@ impl Tree { } /// Returns a mutable reference to the child on the given side, if any. - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub(crate) fn child_mut(&mut self, left: bool) -> &mut Option { if left { &mut self.left @@ -236,7 +236,7 @@ impl Tree { /// Attaches the child to the `Tree`'s given side. Panics if there is /// already a child attached to this side. - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] pub(crate) fn attach(&mut self, left: bool, child: Self) -> CostResult<(), Error> { let mut cost = OperationCost::default(); @@ -266,7 +266,7 @@ impl Tree { /// Returns the already-computed hash for this tree node's child on the /// given side, if any. If there is no child, returns the null hash /// (zero-filled). - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] #[inline] const fn child_hash(&self, left: bool) -> CryptoHash { match self.child(left) { @@ -277,12 +277,12 @@ impl Tree { /// Consumes the tree node, calculates its hash, and returns a `Node::Hash` /// variant. - #[cfg(any(feature = "full", feature = "verify"))] + #[cfg(any(feature = "minimal", feature = "verify"))] fn into_hash(self) -> CostContext { self.hash().map(|hash| Node::Hash(hash).into()) } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub(crate) fn key(&self) -> &[u8] { match self.node { Node::KV(ref key, _) @@ -293,7 +293,7 @@ impl Tree { } } - #[cfg(feature = "full")] + #[cfg(feature = "minimal")] pub(crate) fn sum(&self) -> Option { match self.node { Node::KVValueHashFeatureType(.., feature_type) => match feature_type { @@ -305,7 +305,7 @@ impl Tree { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// `LayerIter` iterates over the nodes in a `Tree` at a given depth. Nodes are /// visited in order. pub struct LayerIter<'a> { @@ -313,7 +313,7 @@ pub struct LayerIter<'a> { depth: usize, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> LayerIter<'a> { /// Creates a new `LayerIter` that iterates over `tree` at the given depth. fn new(tree: &'a Tree, depth: usize) -> Self { @@ -327,7 +327,7 @@ impl<'a> LayerIter<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> Iterator for LayerIter<'a> { type Item = &'a Tree; @@ -349,7 +349,7 @@ impl<'a> Iterator for LayerIter<'a> { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Executes a proof by stepping through its operators, modifying the /// verification stack as it goes. The resulting stack item is returned. /// @@ -523,7 +523,7 @@ where Ok(tree).wrap_with_cost(cost) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use super::{super::*, Tree as ProofTree, *}; diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index 69e5b555..9a059712 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -28,12 +28,12 @@ //! Temp merk test utils -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::ops::{Deref, DerefMut}; use grovedb_path::SubtreePath; use grovedb_storage::StorageBatch; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::{ rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext}, Storage, @@ -41,10 +41,10 @@ use grovedb_storage::{ use grovedb_version::version::GroveVersion; use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::Merk; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Wraps a Merk instance and deletes it from disk it once it goes out of scope. pub struct TempMerk { storage: &'static TempStorage, @@ -52,7 +52,7 @@ pub struct TempMerk { merk: Merk>, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TempMerk { /// Opens a `TempMerk` at the given file path, creating a new one if it /// does not exist. @@ -102,7 +102,7 @@ impl TempMerk { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Drop for TempMerk { fn drop(&mut self) { unsafe { @@ -113,14 +113,14 @@ impl Drop for TempMerk { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Default for TempMerk { fn default() -> Self { Self::new(GroveVersion::latest()) } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Deref for TempMerk { type Target = Merk>; @@ -129,7 +129,7 @@ impl Deref for TempMerk { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl DerefMut for TempMerk { fn deref_mut(&mut self) -> &mut Merk> { &mut self.merk diff --git a/merk/src/tree/commit.rs b/merk/src/tree/commit.rs index 31b0df5c..6390d6e3 100644 --- a/merk/src/tree/commit.rs +++ b/merk/src/tree/commit.rs @@ -1,11 +1,11 @@ //! Merk tree commit -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::TreeNode; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::error::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// To be used when committing a tree (writing it to a store after applying the /// changes). pub trait Commit { @@ -26,12 +26,12 @@ pub trait Commit { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// A `Commit` implementation which does not write to a store and does not prune /// any nodes from the Tree. Useful when only keeping a tree in memory. pub struct NoopCommit {} -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Commit for NoopCommit { fn write( &mut self, diff --git a/merk/src/tree/debug.rs b/merk/src/tree/debug.rs index 3e88c60b..d6e3738a 100644 --- a/merk/src/tree/debug.rs +++ b/merk/src/tree/debug.rs @@ -1,40 +1,13 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - //! Merk tree debug use std::fmt::{Debug, Formatter, Result}; +#[cfg(feature = "colored")] use colored::Colorize; use super::{Link, TreeNode}; -#[cfg(feature = "full")] +#[cfg(all(feature = "minimal", feature = "colored"))] impl Debug for TreeNode { // TODO: unwraps should be results that bubble up fn fmt(&self, f: &mut Formatter) -> Result { @@ -55,7 +28,6 @@ impl Debug for TreeNode { } let depth = stack.len(); - if depth > 0 { // draw ancestor's vertical lines for (low, high) in stack.iter().take(depth - 1) { @@ -127,3 +99,80 @@ impl Debug for TreeNode { writeln!(f) } } + +#[cfg(all(feature = "minimal", not(feature = "colored")))] +impl Debug for TreeNode { + fn fmt(&self, f: &mut Formatter) -> Result { + fn traverse( + f: &mut Formatter, + cursor: &TreeNode, + stack: &mut Vec<(Vec, Vec)>, + left: bool, + ) { + if let Some(child_link) = cursor.link(true) { + stack.push((child_link.key().to_vec(), cursor.key().to_vec())); + if let Some(child_tree) = child_link.tree() { + traverse(f, child_tree, stack, true); + } else { + traverse_pruned(f, child_link, stack, true); + } + stack.pop(); + } + + let depth = stack.len(); + if depth > 0 { + for (low, high) in stack.iter().take(depth - 1) { + let draw_line = cursor.key() > low && cursor.key() < high; + write!(f, "{}", if draw_line { " │ " } else { " " }).unwrap(); + } + } + + let prefix = if depth == 0 { + "" + } else if left { + " ┌-" + } else { + " â””-" + }; + writeln!(f, "{}{:?}", prefix, cursor.key()).unwrap(); + + if let Some(child_link) = cursor.link(false) { + stack.push((cursor.key().to_vec(), child_link.key().to_vec())); + if let Some(child_tree) = child_link.tree() { + traverse(f, child_tree, stack, false); + } else { + traverse_pruned(f, child_link, stack, false); + } + stack.pop(); + } + } + + fn traverse_pruned( + f: &mut Formatter, + link: &Link, + stack: &mut [(Vec, Vec)], + left: bool, + ) { + let depth = stack.len(); + if depth > 0 { + for (low, high) in stack.iter().take(depth - 1) { + let draw_line = link.key() > low && link.key() < high; + write!(f, "{}", if draw_line { " │ " } else { " " }).unwrap(); + } + } + + let prefix = if depth == 0 { + "" + } else if left { + " ┌-" + } else { + " â””-" + }; + writeln!(f, "{}{:?}", prefix, link.key()).unwrap(); + } + + let mut stack = vec![]; + traverse(f, self, &mut stack, false); + writeln!(f) + } +} diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index cd10937d..1e1a3bea 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -1,26 +1,26 @@ //! Merk tree encoding -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use ed::{Decode, Encode}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_storage::StorageContext; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::TreeNode; use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ error::{Error, Error::EdError}, tree::TreeNodeInner, Error::StorageError, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeNode { /// Decode given bytes and set as Tree fields. Set key to value of given /// key. @@ -67,7 +67,7 @@ impl TreeNode { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeNode { #[inline] /// Encode @@ -143,7 +143,7 @@ impl TreeNode { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod tests { use super::{super::Link, *}; diff --git a/merk/src/tree/fuzz_tests.rs b/merk/src/tree/fuzz_tests.rs index eb026f56..cb74280a 100644 --- a/merk/src/tree/fuzz_tests.rs +++ b/merk/src/tree/fuzz_tests.rs @@ -2,21 +2,21 @@ #![cfg(tests)] -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{cell::RefCell, collections::BTreeMap}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use rand::prelude::*; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{test_utils::*, tree::*}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] const ITERATIONS: usize = 2_000; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] type Map = BTreeMap, Vec>; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[test] fn fuzz() { let mut rng = thread_rng(); @@ -27,19 +27,19 @@ fn fuzz() { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[test] fn fuzz_17391518417409062786() { fuzz_case(17391518417409062786); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[test] fn fuzz_396148930387069749() { fuzz_case(396148930387069749); } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] fn fuzz_case(seed: u64, using_sum_trees: bool) { let mut rng: SmallRng = SeedableRng::seed_from_u64(seed); let initial_size = (rng.gen::() % 10) + 1; @@ -72,7 +72,7 @@ fn fuzz_case(seed: u64, using_sum_trees: bool) { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] fn make_batch(maybe_tree: Option<&TreeNode>, size: u64, seed: u64) -> Vec { let rng: RefCell = RefCell::new(SeedableRng::seed_from_u64(seed)); let mut batch = Vec::with_capacity(size as usize); @@ -133,7 +133,7 @@ fn make_batch(maybe_tree: Option<&TreeNode>, size: u64, seed: u64) -> Vec, map: &Map) { if map.is_empty() { assert!(maybe_tree.is_none(), "expected tree to be None"); diff --git a/merk/src/tree/hash.rs b/merk/src/tree/hash.rs index e23566a9..8e6647d4 100644 --- a/merk/src/tree/hash.rs +++ b/merk/src/tree/hash.rs @@ -1,38 +1,38 @@ //! Merk tree hash -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_costs::{CostContext, CostsExt, OperationCost}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use integer_encoding::*; /// The length of a `Hash` (in bytes). -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub const HASH_LENGTH: usize = 32; /// 2x length of a `Hash` -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const HASH_LENGTH_X2: usize = 64; /// Length of a `Hash` as u32 -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const HASH_LENGTH_U32: u32 = 32; /// 2x length of a `Hash` as u32 -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const HASH_LENGTH_U32_X2: u32 = 64; /// Hash block size -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const HASH_BLOCK_SIZE: usize = 64; /// Hash block size as u32 -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub const HASH_BLOCK_SIZE_U32: u32 = 64; /// A zero-filled `Hash`. -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub const NULL_HASH: CryptoHash = [0; HASH_LENGTH]; /// A cryptographic hash digest. -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub type CryptoHash = [u8; HASH_LENGTH]; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Hashes a value pub fn value_hash(value: &[u8]) -> CostContext { // TODO: make generic to allow other hashers @@ -53,7 +53,7 @@ pub fn value_hash(value: &[u8]) -> CostContext { }) } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Hashes a key/value pair. /// /// The result is Hash(key_len, key, Hash(value_len, value)) @@ -80,7 +80,7 @@ pub fn kv_hash(key: &[u8], value: &[u8]) -> CostContext { hash.wrap_with_cost(cost) } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Computes the kv hash given a kv digest pub fn kv_digest_to_kv_hash(key: &[u8], value_hash: &CryptoHash) -> CostContext { let mut hasher = blake3::Hasher::new(); @@ -102,7 +102,7 @@ pub fn kv_digest_to_kv_hash(key: &[u8], value_hash: &CryptoHash) -> CostContext< }) } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Hashes a node based on the hash of its key/value pair, the hash of its left /// child (if any), and the hash of its right child (if any). pub fn node_hash( @@ -128,7 +128,7 @@ pub fn node_hash( }) } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] /// Combines two hash values into one pub fn combine_hash(hash_one: &CryptoHash, hash_two: &CryptoHash) -> CostContext { let mut hasher = blake3::Hasher::new(); diff --git a/merk/src/tree/iter.rs b/merk/src/tree/iter.rs index 03cca6ea..96c04e29 100644 --- a/merk/src/tree/iter.rs +++ b/merk/src/tree/iter.rs @@ -1,9 +1,9 @@ //! Merk tree iterator -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::TreeNode; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// An entry stored on an `Iter`'s stack, containing a reference to a `Tree`, /// and its traversal state. /// @@ -14,7 +14,7 @@ struct StackItem<'a> { traversed: (bool, bool, bool), } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> StackItem<'a> { /// Creates a new `StackItem` for the given tree. The `traversed` state will /// be `false` since the children and self have not been visited yet, but @@ -39,14 +39,14 @@ impl<'a> StackItem<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// An iterator which yields the key/value pairs of the tree, in order, skipping /// any parts of the tree which are pruned (not currently retained in memory). pub struct Iter<'a> { stack: Vec>, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> Iter<'a> { /// Creates a new iterator for the given tree. pub fn new(tree: &'a TreeNode) -> Self { @@ -55,7 +55,7 @@ impl<'a> Iter<'a> { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> TreeNode { /// Creates an iterator which yields `(key, value)` tuples for all of the /// tree's nodes which are retained in memory (skipping pruned subtrees). @@ -64,7 +64,7 @@ impl<'a> TreeNode { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a> Iterator for Iter<'a> { type Item = (Vec, Vec); diff --git a/merk/src/tree/kv.rs b/merk/src/tree/kv.rs index f4a0e224..7de707db 100644 --- a/merk/src/tree/kv.rs +++ b/merk/src/tree/kv.rs @@ -1,19 +1,19 @@ //! Merk tree key-values -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::io::{Read, Write}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use ed::{Decode, Encode, Result, Terminated}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{CostContext, CostsExt, OperationCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::hash::{CryptoHash, HASH_LENGTH, NULL_HASH}; use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{ tree::{ hash::{combine_hash, kv_digest_to_kv_hash, value_hash, HASH_LENGTH_X2}, @@ -28,7 +28,7 @@ use crate::{ // field and value field. /// It is possible to predefine the value cost of specific types -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[derive(Clone, Debug, PartialEq, Eq)] pub enum ValueDefinedCostType { /// There is a predefined cost used to remove the root key from a sub tree @@ -40,7 +40,7 @@ pub enum ValueDefinedCostType { SpecializedValueDefinedCost(u32), } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Contains a key/value pair, and the hash of the key/value pair. #[derive(Clone, Debug, PartialEq, Eq)] pub struct KV { @@ -54,7 +54,7 @@ pub struct KV { pub(super) value_hash: CryptoHash, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl KV { /// Creates a new `KV` with the given key and value and computes its hash. #[inline] @@ -456,7 +456,7 @@ impl KV { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] // TODO: Fix encoding and decoding of kv impl Encode for KV { #[inline] @@ -475,7 +475,7 @@ impl Encode for KV { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Decode for KV { #[inline] fn decode(input: R) -> Result { @@ -506,10 +506,10 @@ impl Decode for KV { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Terminated for KV {} -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use super::*; diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index f445dd11..6c372d87 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -1,21 +1,21 @@ //! Merk tree link -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::io::{Read, Write}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use ed::{Decode, Encode, Result, Terminated}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::{hash::CryptoHash, TreeNode}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::HASH_LENGTH_U32; // TODO: optimize memory footprint -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Represents a reference to a child tree node. Links may or may not contain /// the child's `Tree` instance (storing its key if not). #[derive(Clone, Debug, PartialEq)] @@ -75,7 +75,7 @@ pub enum Link { }, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Link { /// Creates a `Link::Modified` from the given `Tree`. #[inline] @@ -295,7 +295,7 @@ impl Link { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Encode for Link { #[inline] fn encode_into(&self, out: &mut W) -> Result<()> { @@ -376,7 +376,7 @@ impl Encode for Link { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Link { #[inline] fn default_reference() -> Self { @@ -389,7 +389,7 @@ impl Link { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Decode for Link { #[inline] fn decode(input: R) -> Result { @@ -440,10 +440,10 @@ impl Decode for Link { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Terminated for Link {} -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[inline] fn read_u8(mut input: R) -> Result { let mut length = [0]; @@ -451,7 +451,7 @@ fn read_u8(mut input: R) -> Result { Ok(length[0]) } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use super::{ diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index 91eebf52..e87865a2 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -1,38 +1,38 @@ //! Merk trees -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod commit; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod debug; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod encoding; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod fuzz_tests; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub mod hash; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod iter; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod just_in_time_value_update; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub mod kv; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod link; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod ops; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] mod tree_feature_type; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod walk; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::cmp::{max, Ordering}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use commit::{Commit, NoopCommit}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use ed::{Decode, Encode, Terminated}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, storage_cost::{ @@ -42,40 +42,40 @@ use grovedb_costs::{ }, CostContext, CostResult, CostsExt, OperationCost, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_version::version::GroveVersion; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use hash::{ combine_hash, kv_digest_to_kv_hash, kv_hash, node_hash, value_hash, CryptoHash, HASH_LENGTH, NULL_HASH, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use hash::{HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH_U32, HASH_LENGTH_U32_X2}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use kv::KV; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use link::Link; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use ops::{AuxMerkBatch, BatchEntry, MerkBatch, Op, PanicSource}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use tree_feature_type::TreeFeatureType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use walk::{Fetch, RefWalker, Walker}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::tree::hash::HASH_LENGTH_X2; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{error::Error, Error::Overflow}; // TODO: remove need for `TreeInner`, and just use `Box` receiver for // relevant methods -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// The fields of the `Tree` type, stored on the heap. #[derive(Clone, Encode, Decode, Debug, PartialEq)] pub struct TreeNodeInner { @@ -84,7 +84,7 @@ pub struct TreeNodeInner { pub(crate) kv: KV, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeNodeInner { /// Get the value as owned of the key value struct pub fn value_as_owned(self) -> Vec { @@ -107,10 +107,10 @@ impl TreeNodeInner { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Terminated for Box {} -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// A binary AVL tree data structure, with Merkle hashes. /// /// Trees' inner fields are stored on the heap so that nodes can recursively @@ -123,7 +123,7 @@ pub struct TreeNode { pub(crate) known_storage_cost: Option, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeNode { /// Creates a new `Tree` with the given key and value, and no children. /// @@ -1027,7 +1027,7 @@ impl TreeNode { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Convert side (left or right) to string pub const fn side_to_str(left: bool) -> &'static str { if left { @@ -1037,7 +1037,7 @@ pub const fn side_to_str(left: bool) -> &'static str { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 3e10b2c8..66fcb716 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -1,12 +1,12 @@ //! Merk tree ops -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use std::{ collections::{BTreeSet, LinkedList}, fmt, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, storage_cost::{ @@ -17,21 +17,21 @@ use grovedb_costs::{ CostContext, CostResult, CostsExt, OperationCost, }; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use integer_encoding::VarInt; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use Op::*; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::{Fetch, Link, TreeNode, Walker}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{error::Error, tree::tree_feature_type::TreeFeatureType, CryptoHash, HASH_LENGTH_U32}; use crate::{ merk::KeyUpdates, tree::kv::{ValueDefinedCostType, ValueDefinedCostType::SpecializedValueDefinedCost}, }; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// An operation to be applied to a key in the store. #[derive(PartialEq, Clone, Eq)] pub enum Op { @@ -70,7 +70,7 @@ pub enum Op { DeleteLayeredMaybeSpecialized, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl fmt::Debug for Op { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!( @@ -115,13 +115,13 @@ pub type MerkBatch = [BatchEntry]; /// and unique. pub type AuxMerkBatch = [AuxBatchEntry]; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// A source of data which panics when called. Useful when creating a store /// which always keeps the state in memory. #[derive(Clone)] pub struct PanicSource {} -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Fetch for PanicSource { fn fetch( &self, @@ -135,7 +135,7 @@ impl Fetch for PanicSource { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Walker where S: Fetch + Sized + Clone, @@ -1045,7 +1045,7 @@ where } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use super::*; diff --git a/merk/src/tree/tree_feature_type.rs b/merk/src/tree/tree_feature_type.rs index c47fb0d6..bb815dca 100644 --- a/merk/src/tree/tree_feature_type.rs +++ b/merk/src/tree/tree_feature_type.rs @@ -1,19 +1,19 @@ //! Merk tree feature type -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use std::io::{Read, Write}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use ed::Terminated; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use ed::{Decode, Encode}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::tree::tree_feature_type::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Copy, Clone, PartialEq, Eq, Debug)] /// Basic or summed pub enum TreeFeatureType { @@ -23,7 +23,7 @@ pub enum TreeFeatureType { SummedMerkNode(i64), } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl TreeFeatureType { #[inline] /// Get length of encoded SummedMerk @@ -50,7 +50,7 @@ impl TreeFeatureType { } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Terminated for TreeFeatureType {} impl Encode for TreeFeatureType { @@ -83,7 +83,7 @@ impl Encode for TreeFeatureType { } } -#[cfg(any(feature = "full", feature = "verify"))] +#[cfg(any(feature = "minimal", feature = "verify"))] impl Decode for TreeFeatureType { #[inline] fn decode(mut input: R) -> ed::Result { diff --git a/merk/src/tree/walk/fetch.rs b/merk/src/tree/walk/fetch.rs index 0ba657f2..5b43d9fe 100644 --- a/merk/src/tree/walk/fetch.rs +++ b/merk/src/tree/walk/fetch.rs @@ -1,17 +1,17 @@ //! Walk -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::CostResult; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::super::{Link, TreeNode}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::error::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// A source of data to be used by the tree when encountering a pruned node. /// This typically means fetching the tree node from a backing store by its key, /// but could also implement an in-memory cache for example. diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 4b67bb60..3a1998c9 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -1,29 +1,29 @@ //! Merk tree walk -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod fetch; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] mod ref_walker; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use fetch::Fetch; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_costs::{ cost_return_on_error_no_add, storage_cost::{removal::StorageRemovedBytes, StorageCost}, }; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] pub use ref_walker::RefWalker; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::{Link, TreeNode}; use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::{owner::Owner, tree::tree_feature_type::TreeFeatureType, CryptoHash, Error}; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Allows traversal of a `Tree`, fetching from the given source when traversing /// to a pruned node, detaching children as they are traversed. pub struct Walker @@ -34,7 +34,7 @@ where source: S, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl Walker where S: Fetch + Sized + Clone, @@ -386,7 +386,7 @@ where } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl From> for TreeNode where S: Fetch + Sized + Clone, @@ -396,7 +396,7 @@ where } } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] #[cfg(test)] mod test { use grovedb_costs::CostsExt; diff --git a/merk/src/tree/walk/ref_walker.rs b/merk/src/tree/walk/ref_walker.rs index 189bc7ee..7f363ba4 100644 --- a/merk/src/tree/walk/ref_walker.rs +++ b/merk/src/tree/walk/ref_walker.rs @@ -1,19 +1,19 @@ //! Merk reference walker -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; use grovedb_version::version::GroveVersion; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use super::{ super::{Link, TreeNode}, Fetch, }; use crate::tree::kv::ValueDefinedCostType; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] use crate::Error; -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] /// Allows read-only traversal of a `Tree`, fetching from the given source when /// traversing to a pruned node. The fetched nodes are then retained in memory /// until they (possibly) get pruned on the next commit. @@ -28,7 +28,7 @@ where source: S, } -#[cfg(feature = "full")] +#[cfg(feature = "minimal")] impl<'a, S> RefWalker<'a, S> where S: Fetch + Sized + Clone, diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 654ddb35..a493aa53 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -18,10 +18,10 @@ hex = "0.4.3" integer-encoding = { version = "4.0.0", optional = true } lazy_static = { version = "1.4.0", optional = true } num_cpus = { version = "1.16.0", optional = true } -rocksdb = { version = "0.22.0", optional = true } +rocksdb = { version = "0.23.0", optional = true } strum = { version = "0.26.2", features = ["derive"] } tempfile = { version = "3.10.1", optional = true } -thiserror = "1.0.59" +thiserror = "2.0.11" [features] rocksdb_storage = ["rocksdb", "num_cpus", "lazy_static", "tempfile", "blake3", "integer-encoding"] diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index 4530f1da..e7a0d2b8 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -10,4 +10,4 @@ repository = "https://github.com/dashpay/grovedb" [dependencies] hex = "0.4.3" -itertools = "0.12.1" +itertools = "0.14.0" From c24eea2073126dd484fd3f028f1cc2559fbd828b Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 14 Jan 2025 16:55:04 +0700 Subject: [PATCH 4/8] chore: update to version 2.2.0 (#351) --- costs/Cargo.toml | 2 +- grovedb-epoch-based-storage-flags/Cargo.toml | 4 ++-- grovedb-version/Cargo.toml | 2 +- grovedb/Cargo.toml | 18 +++++++++--------- grovedbg-types/Cargo.toml | 2 +- merk/Cargo.toml | 12 ++++++------ node-grove/Cargo.toml | 4 ++-- path/Cargo.toml | 2 +- storage/Cargo.toml | 8 ++++---- visualize/Cargo.toml | 2 +- 10 files changed, 28 insertions(+), 28 deletions(-) diff --git a/costs/Cargo.toml b/costs/Cargo.toml index aebaab67..db064e08 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-costs" -version = "2.1.0" +version = "2.2.0" edition = "2021" license = "MIT" description = "Costs extension crate for GroveDB" diff --git a/grovedb-epoch-based-storage-flags/Cargo.toml b/grovedb-epoch-based-storage-flags/Cargo.toml index 5e3cde71..01594792 100644 --- a/grovedb-epoch-based-storage-flags/Cargo.toml +++ b/grovedb-epoch-based-storage-flags/Cargo.toml @@ -2,13 +2,13 @@ name = "grovedb-epoch-based-storage-flags" authors = ["Samuel Westrich "] description = "Epoch based storage flags for GroveDB" -version = "2.1.0" +version = "2.2.0" edition = "2021" license = "MIT" repository = "https://github.com/dashpay/grovedb" [dependencies] -grovedb-costs = { version = "2.1.0", path = "../costs" } +grovedb-costs = { version = "2.2.0", path = "../costs" } hex = { version = "0.4.3" } integer-encoding = { version = "4.0.0" } diff --git a/grovedb-version/Cargo.toml b/grovedb-version/Cargo.toml index a46cb758..0c523e1c 100644 --- a/grovedb-version/Cargo.toml +++ b/grovedb-version/Cargo.toml @@ -2,7 +2,7 @@ name = "grovedb-version" authors = ["Samuel Westrich "] description = "Versioning library for Platform" -version = "2.1.0" +version = "2.2.0" edition = "2021" license = "MIT" repository = "https://github.com/dashpay/grovedb" diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index de616e55..e1368278 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb" description = "Fully featured database using balanced hierarchical authenticated data structures" -version = "2.1.0" +version = "2.2.0" authors = ["Samuel Westrich ", "Wisdom Ogwu "] edition = "2021" license = "MIT" @@ -11,13 +11,13 @@ readme = "../README.md" documentation = "https://docs.rs/grovedb" [dependencies] -grovedb-costs = { version = "2.1.0", path = "../costs" , optional = true } -grovedbg-types = { version = "2.1.0", path = "../grovedbg-types", optional = true } -grovedb-merk = { version = "2.1.0", path = "../merk", optional = true, default-features = false } -grovedb-path = { version = "2.1.0", path = "../path" } -grovedb-storage = { version = "2.1.0", path = "../storage", optional = true } -grovedb-version = { version = "2.1.0", path = "../grovedb-version" } -grovedb-visualize = { version = "2.1.0", path = "../visualize", optional = true } +grovedb-costs = { version = "2.2.0", path = "../costs" , optional = true } +grovedbg-types = { version = "2.2.0", path = "../grovedbg-types", optional = true } +grovedb-merk = { version = "2.2.0", path = "../merk", optional = true, default-features = false } +grovedb-path = { version = "2.2.0", path = "../path" } +grovedb-storage = { version = "2.2.0", path = "../storage", optional = true } +grovedb-version = { version = "2.2.0", path = "../grovedb-version" } +grovedb-visualize = { version = "2.2.0", path = "../visualize", optional = true } axum = { version = "=0.7.5", features = ["macros"], optional = true } bincode = { version = "2.0.0-rc.3" } @@ -36,7 +36,7 @@ zip-extensions = { version = "0.8.1", optional = true } serde = { version = "1.0.210", features = ["derive"], optional = true } [dev-dependencies] -grovedb-epoch-based-storage-flags = { version = "2.1.0", path = "../grovedb-epoch-based-storage-flags" } +grovedb-epoch-based-storage-flags = { version = "2.2.0", path = "../grovedb-epoch-based-storage-flags" } criterion = "0.5.1" hex = "0.4.3" diff --git a/grovedbg-types/Cargo.toml b/grovedbg-types/Cargo.toml index 7f4395a5..be14fd54 100644 --- a/grovedbg-types/Cargo.toml +++ b/grovedbg-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedbg-types" -version = "2.1.0" +version = "2.2.0" edition = "2021" description = "Common type definitions for data exchange over GroveDBG protocol" authors = ["Evgeny Fomin "] diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 40510f6b..a720f127 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb-merk" description = "Merkle key/value store adapted for GroveDB" -version = "2.1.0" +version = "2.2.0" authors = ["Samuel Westrich ", "Wisdom Ogwu ", "Matt Bell "] edition = "2021" license = "MIT" @@ -11,11 +11,11 @@ readme = "README.md" documentation = "https://docs.rs/grovedb-merk" [dependencies] -grovedb-costs = { version = "2.1.0" , path = "../costs" } -grovedb-path = { version = "2.1.0", path = "../path" } -grovedb-storage = { version = "2.1.0", path = "../storage", optional = true } -grovedb-version = { version = "2.1.0", path = "../grovedb-version" } -grovedb-visualize = { version = "2.1.0", path = "../visualize" } +grovedb-costs = { version = "2.2.0" , path = "../costs" } +grovedb-path = { version = "2.2.0", path = "../path" } +grovedb-storage = { version = "2.2.0", path = "../storage", optional = true } +grovedb-version = { version = "2.2.0", path = "../grovedb-version" } +grovedb-visualize = { version = "2.2.0", path = "../visualize" } bincode = { version = "2.0.0-rc.3" } hex = "0.4.3" diff --git a/node-grove/Cargo.toml b/node-grove/Cargo.toml index a2876475..cc5624c3 100644 --- a/node-grove/Cargo.toml +++ b/node-grove/Cargo.toml @@ -10,8 +10,8 @@ exclude = ["index.node"] crate-type = ["cdylib"] [dependencies] -grovedb = { version = "2.1.0", path = "../grovedb", features = ["full", "estimated_costs"] } -grovedb-version = { version = "2.1.0", path = "../grovedb-version" } +grovedb = { version = "2.2.0", path = "../grovedb", features = ["full", "estimated_costs"] } +grovedb-version = { version = "2.2.0", path = "../grovedb-version" } [dependencies.neon] version = "0.10.1" diff --git a/path/Cargo.toml b/path/Cargo.toml index 92417664..1334da45 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-path" -version = "2.1.0" +version = "2.2.0" edition = "2021" license = "MIT" description = "Path extension crate for GroveDB" diff --git a/storage/Cargo.toml b/storage/Cargo.toml index a493aa53..683470de 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-storage" -version = "2.1.0" +version = "2.2.0" edition = "2021" license = "MIT" description = "Storage extension crate for GroveDB" @@ -9,9 +9,9 @@ documentation = "https://docs.rs/grovedb-storage" repository = "https://github.com/dashpay/grovedb" [dependencies] -grovedb-costs = { version = "2.1.0", path = "../costs" } -grovedb-path = { version = "2.1.0", path = "../path" } -grovedb-visualize = { version = "2.1.0", path = "../visualize" } +grovedb-costs = { version = "2.2.0", path = "../costs" } +grovedb-path = { version = "2.2.0", path = "../path" } +grovedb-visualize = { version = "2.2.0", path = "../visualize" } blake3 = { version = "1.5.1", optional = true } hex = "0.4.3" diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index e7a0d2b8..603fad71 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-visualize" -version = "2.1.0" +version = "2.2.0" edition = "2021" license = "MIT" description = "Debug prints extension crate for GroveDB" From fd0d0a12ee1450a364b83f534848b6b75d5a32e8 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 14 Jan 2025 17:56:47 +0700 Subject: [PATCH 5/8] fix: fix sub tree prefix and go to version 2.2.1 (#352) * fix: fix sub tree prefix and go to version 2.2.1 * fix: fix sub tree prefix and go to version 2.2.1 * fix * fix * fix --- costs/Cargo.toml | 2 +- grovedb-epoch-based-storage-flags/Cargo.toml | 4 ++-- grovedb-version/Cargo.toml | 2 +- grovedb/Cargo.toml | 18 +++++++++--------- grovedb/src/replication/state_sync_session.rs | 2 +- grovedbg-types/Cargo.toml | 2 +- merk/Cargo.toml | 12 ++++++------ node-grove/Cargo.toml | 4 ++-- path/Cargo.toml | 2 +- storage/Cargo.toml | 8 ++++---- storage/src/rocksdb_storage/storage.rs | 3 +-- storage/src/storage.rs | 3 +-- visualize/Cargo.toml | 2 +- 13 files changed, 31 insertions(+), 33 deletions(-) diff --git a/costs/Cargo.toml b/costs/Cargo.toml index db064e08..382404d6 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-costs" -version = "2.2.0" +version = "2.2.1" edition = "2021" license = "MIT" description = "Costs extension crate for GroveDB" diff --git a/grovedb-epoch-based-storage-flags/Cargo.toml b/grovedb-epoch-based-storage-flags/Cargo.toml index 01594792..2e32ec2b 100644 --- a/grovedb-epoch-based-storage-flags/Cargo.toml +++ b/grovedb-epoch-based-storage-flags/Cargo.toml @@ -2,13 +2,13 @@ name = "grovedb-epoch-based-storage-flags" authors = ["Samuel Westrich "] description = "Epoch based storage flags for GroveDB" -version = "2.2.0" +version = "2.2.1" edition = "2021" license = "MIT" repository = "https://github.com/dashpay/grovedb" [dependencies] -grovedb-costs = { version = "2.2.0", path = "../costs" } +grovedb-costs = { version = "2.2.1", path = "../costs" } hex = { version = "0.4.3" } integer-encoding = { version = "4.0.0" } diff --git a/grovedb-version/Cargo.toml b/grovedb-version/Cargo.toml index 0c523e1c..5cbd2dbe 100644 --- a/grovedb-version/Cargo.toml +++ b/grovedb-version/Cargo.toml @@ -2,7 +2,7 @@ name = "grovedb-version" authors = ["Samuel Westrich "] description = "Versioning library for Platform" -version = "2.2.0" +version = "2.2.1" edition = "2021" license = "MIT" repository = "https://github.com/dashpay/grovedb" diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index e1368278..f977d0f4 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb" description = "Fully featured database using balanced hierarchical authenticated data structures" -version = "2.2.0" +version = "2.2.1" authors = ["Samuel Westrich ", "Wisdom Ogwu "] edition = "2021" license = "MIT" @@ -11,13 +11,13 @@ readme = "../README.md" documentation = "https://docs.rs/grovedb" [dependencies] -grovedb-costs = { version = "2.2.0", path = "../costs" , optional = true } -grovedbg-types = { version = "2.2.0", path = "../grovedbg-types", optional = true } -grovedb-merk = { version = "2.2.0", path = "../merk", optional = true, default-features = false } -grovedb-path = { version = "2.2.0", path = "../path" } -grovedb-storage = { version = "2.2.0", path = "../storage", optional = true } -grovedb-version = { version = "2.2.0", path = "../grovedb-version" } -grovedb-visualize = { version = "2.2.0", path = "../visualize", optional = true } +grovedb-costs = { version = "2.2.1", path = "../costs" , optional = true } +grovedbg-types = { version = "2.2.1", path = "../grovedbg-types", optional = true } +grovedb-merk = { version = "2.2.1", path = "../merk", optional = true, default-features = false } +grovedb-path = { version = "2.2.1", path = "../path" } +grovedb-storage = { version = "2.2.1", path = "../storage", optional = true } +grovedb-version = { version = "2.2.1", path = "../grovedb-version" } +grovedb-visualize = { version = "2.2.1", path = "../visualize", optional = true } axum = { version = "=0.7.5", features = ["macros"], optional = true } bincode = { version = "2.0.0-rc.3" } @@ -36,7 +36,7 @@ zip-extensions = { version = "0.8.1", optional = true } serde = { version = "1.0.210", features = ["derive"], optional = true } [dev-dependencies] -grovedb-epoch-based-storage-flags = { version = "2.2.0", path = "../grovedb-epoch-based-storage-flags" } +grovedb-epoch-based-storage-flags = { version = "2.2.1", path = "../grovedb-epoch-based-storage-flags" } criterion = "0.5.1" hex = "0.4.3" diff --git a/grovedb/src/replication/state_sync_session.rs b/grovedb/src/replication/state_sync_session.rs index 59d93316..1ce41c4b 100644 --- a/grovedb/src/replication/state_sync_session.rs +++ b/grovedb/src/replication/state_sync_session.rs @@ -22,7 +22,7 @@ use super::{ }; use crate::{replication, Element, Error, GroveDb, Transaction}; -pub(crate) type SubtreePrefix = [u8; blake3::OUT_LEN]; +pub(crate) type SubtreePrefix = [u8; 32]; /// Struct governing the state synchronization of one subtree. struct SubtreeStateSyncInfo<'db> { diff --git a/grovedbg-types/Cargo.toml b/grovedbg-types/Cargo.toml index be14fd54..7c5eb549 100644 --- a/grovedbg-types/Cargo.toml +++ b/grovedbg-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedbg-types" -version = "2.2.0" +version = "2.2.1" edition = "2021" description = "Common type definitions for data exchange over GroveDBG protocol" authors = ["Evgeny Fomin "] diff --git a/merk/Cargo.toml b/merk/Cargo.toml index a720f127..903c61d7 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb-merk" description = "Merkle key/value store adapted for GroveDB" -version = "2.2.0" +version = "2.2.1" authors = ["Samuel Westrich ", "Wisdom Ogwu ", "Matt Bell "] edition = "2021" license = "MIT" @@ -11,11 +11,11 @@ readme = "README.md" documentation = "https://docs.rs/grovedb-merk" [dependencies] -grovedb-costs = { version = "2.2.0" , path = "../costs" } -grovedb-path = { version = "2.2.0", path = "../path" } -grovedb-storage = { version = "2.2.0", path = "../storage", optional = true } -grovedb-version = { version = "2.2.0", path = "../grovedb-version" } -grovedb-visualize = { version = "2.2.0", path = "../visualize" } +grovedb-costs = { version = "2.2.1" , path = "../costs" } +grovedb-path = { version = "2.2.1", path = "../path" } +grovedb-storage = { version = "2.2.1", path = "../storage", optional = true } +grovedb-version = { version = "2.2.1", path = "../grovedb-version" } +grovedb-visualize = { version = "2.2.1", path = "../visualize" } bincode = { version = "2.0.0-rc.3" } hex = "0.4.3" diff --git a/node-grove/Cargo.toml b/node-grove/Cargo.toml index cc5624c3..bd91146b 100644 --- a/node-grove/Cargo.toml +++ b/node-grove/Cargo.toml @@ -10,8 +10,8 @@ exclude = ["index.node"] crate-type = ["cdylib"] [dependencies] -grovedb = { version = "2.2.0", path = "../grovedb", features = ["full", "estimated_costs"] } -grovedb-version = { version = "2.2.0", path = "../grovedb-version" } +grovedb = { version = "2.2.1", path = "../grovedb", features = ["full", "estimated_costs"] } +grovedb-version = { version = "2.2.1", path = "../grovedb-version" } [dependencies.neon] version = "0.10.1" diff --git a/path/Cargo.toml b/path/Cargo.toml index 1334da45..f5f89be7 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-path" -version = "2.2.0" +version = "2.2.1" edition = "2021" license = "MIT" description = "Path extension crate for GroveDB" diff --git a/storage/Cargo.toml b/storage/Cargo.toml index 683470de..e2db3bb6 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-storage" -version = "2.2.0" +version = "2.2.1" edition = "2021" license = "MIT" description = "Storage extension crate for GroveDB" @@ -9,9 +9,9 @@ documentation = "https://docs.rs/grovedb-storage" repository = "https://github.com/dashpay/grovedb" [dependencies] -grovedb-costs = { version = "2.2.0", path = "../costs" } -grovedb-path = { version = "2.2.0", path = "../path" } -grovedb-visualize = { version = "2.2.0", path = "../visualize" } +grovedb-costs = { version = "2.2.1", path = "../costs" } +grovedb-path = { version = "2.2.1", path = "../path" } +grovedb-visualize = { version = "2.2.1", path = "../visualize" } blake3 = { version = "1.5.1", optional = true } hex = "0.4.3" diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index 44510694..f43c05e2 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -57,8 +57,7 @@ use crate::{ }; const BLAKE_BLOCK_LEN: usize = 64; - -pub type SubtreePrefix = [u8; blake3::OUT_LEN]; +pub type SubtreePrefix = [u8; 32]; fn blake_block_count(len: usize) -> usize { if len == 0 { diff --git a/storage/src/storage.rs b/storage/src/storage.rs index 2795cfc2..196507a6 100644 --- a/storage/src/storage.rs +++ b/storage/src/storage.rs @@ -42,8 +42,7 @@ use grovedb_path::SubtreePath; use grovedb_visualize::visualize_to_vec; use crate::{worst_case_costs::WorstKeyLength, Error}; - -pub type SubtreePrefix = [u8; blake3::OUT_LEN]; +pub type SubtreePrefix = [u8; 32]; /// Top-level storage_cost abstraction. /// Should be able to hold storage_cost connection and to start transaction when diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index 603fad71..233341a2 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-visualize" -version = "2.2.0" +version = "2.2.1" edition = "2021" license = "MIT" description = "Debug prints extension crate for GroveDB" From d29da251c578e3723bca95a06677f5f4404d390a Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Thu, 16 Jan 2025 16:07:48 +0700 Subject: [PATCH 6/8] feat: big sum trees and count trees (#348) * aggregate sum tree types * added count sum tree * added big sum tree --- costs/Cargo.toml | 2 +- costs/src/lib.rs | 35 +- grovedb-epoch-based-storage-flags/Cargo.toml | 4 +- grovedb-version/Cargo.toml | 2 +- grovedb-version/src/lib.rs | 17 + grovedb-version/src/version/merk_versions.rs | 12 +- grovedb-version/src/version/mod.rs | 11 +- grovedb-version/src/version/v1.rs | 9 +- grovedb-version/src/version/v2.rs | 193 ++ grovedb/Cargo.toml | 21 +- grovedb/src/batch/batch_structure.rs | 15 +- .../estimated_costs/average_case_costs.rs | 118 +- .../batch/estimated_costs/worst_case_costs.rs | 67 +- .../batch/just_in_time_reference_update.rs | 15 +- grovedb/src/batch/mod.rs | 303 +- .../src/batch/single_deletion_cost_tests.rs | 11 +- .../single_sum_item_deletion_cost_tests.rs | 5 +- grovedb/src/debugger.rs | 63 +- grovedb/src/element/constructor.rs | 114 + grovedb/src/element/delete.rs | 80 +- grovedb/src/element/get.rs | 164 +- grovedb/src/element/helpers.rs | 237 +- grovedb/src/element/insert.rs | 41 +- grovedb/src/element/mod.rs | 96 +- grovedb/src/element/query.rs | 7 + grovedb/src/error.rs | 46 + .../src/estimated_costs/average_case_costs.rs | 258 +- .../src/estimated_costs/worst_case_costs.rs | 98 +- grovedb/src/lib.rs | 187 +- grovedb/src/operations/delete/average_case.rs | 19 +- .../src/operations/delete/delete_up_tree.rs | 9 +- grovedb/src/operations/delete/mod.rs | 29 +- grovedb/src/operations/delete/worst_case.rs | 32 +- grovedb/src/operations/get/average_case.rs | 30 +- grovedb/src/operations/get/mod.rs | 22 +- grovedb/src/operations/get/query.rs | 58 +- grovedb/src/operations/get/worst_case.rs | 14 +- grovedb/src/operations/insert/mod.rs | 91 +- grovedb/src/operations/is_empty_tree.rs | 1 + grovedb/src/operations/proof/generate.rs | 5 +- grovedb/src/operations/proof/verify.rs | 9 +- grovedb/src/replication.rs | 27 +- grovedb/src/replication/state_sync_session.rs | 15 +- grovedb/src/tests/count_sum_tree_tests.rs | 556 ++++ grovedb/src/tests/count_tree_tests.rs | 852 ++++++ grovedb/src/tests/mod.rs | 12 +- grovedb/src/tests/sum_tree_tests.rs | 2425 +++++++++++------ grovedb/src/util.rs | 116 +- grovedb/src/visualize.rs | 33 + grovedbg-types/Cargo.toml | 2 +- grovedbg-types/src/lib.rs | 25 + merk/Cargo.toml | 13 +- merk/src/error.rs | 6 + .../src/estimated_costs/average_case_costs.rs | 454 ++- merk/src/estimated_costs/mod.rs | 55 +- merk/src/estimated_costs/worst_case_costs.rs | 20 +- merk/src/lib.rs | 8 +- merk/src/merk/apply.rs | 5 +- merk/src/merk/committer.rs | 3 +- merk/src/merk/mod.rs | 118 +- merk/src/merk/open.rs | 30 +- merk/src/merk/restore.rs | 33 +- merk/src/merk/source.rs | 7 +- merk/src/proofs/tree.rs | 34 +- merk/src/test_utils/mod.rs | 21 +- merk/src/test_utils/temp_merk.rs | 6 +- merk/src/tree/encoding.rs | 13 +- merk/src/tree/kv.rs | 56 +- merk/src/tree/link.rs | 267 +- merk/src/tree/mod.rs | 149 +- merk/src/tree/ops.rs | 2 +- merk/src/tree/tree_feature_type.rs | 173 +- merk/src/tree/walk/mod.rs | 4 +- merk/src/tree_type.rs | 81 + node-grove/Cargo.toml | 4 +- node-grove/src/converter.rs | 6 + path/Cargo.toml | 3 +- path/src/subtree_path.rs | 50 +- path/src/util/compact_bytes.rs | 2 +- storage/Cargo.toml | 8 +- visualize/Cargo.toml | 2 +- 81 files changed, 6328 insertions(+), 1918 deletions(-) create mode 100644 grovedb-version/src/version/v2.rs create mode 100644 grovedb/src/tests/count_sum_tree_tests.rs create mode 100644 grovedb/src/tests/count_tree_tests.rs create mode 100644 merk/src/tree_type.rs diff --git a/costs/Cargo.toml b/costs/Cargo.toml index 382404d6..a98dc02d 100644 --- a/costs/Cargo.toml +++ b/costs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-costs" -version = "2.2.1" +version = "3.0.0" edition = "2021" license = "MIT" description = "Costs extension crate for GroveDB" diff --git a/costs/src/lib.rs b/costs/src/lib.rs index c3d85530..7e576dc1 100644 --- a/costs/src/lib.rs +++ b/costs/src/lib.rs @@ -72,9 +72,29 @@ pub type ChildrenSizesWithValue = Option<( Option<(ChildKeyLength, ChildSumLength)>, )>; +/// The tree cost type +pub enum TreeCostType { + /// This is for sum trees and count trees + TreeFeatureUsesVarIntCostAs8Bytes, + /// This is for count sum trees + TreeFeatureUsesTwoVarIntsCostAs16Bytes, + /// This is for big sum trees + TreeFeatureUses16Bytes, +} + +impl TreeCostType { + fn cost_size(&self) -> u32 { + match self { + TreeCostType::TreeFeatureUsesVarIntCostAs8Bytes => 8, + TreeCostType::TreeFeatureUsesTwoVarIntsCostAs16Bytes => 16, + TreeCostType::TreeFeatureUses16Bytes => 16, + } + } +} + /// Children sizes starting with if we are in a sum tree pub type ChildrenSizesWithIsSumTree = Option<( - Option, + Option<(TreeCostType, FeatureSumLength)>, Option<(ChildKeyLength, ChildSumLength)>, Option<(ChildKeyLength, ChildSumLength)>, )>; @@ -199,10 +219,14 @@ impl OperationCost { paid_value_len -= right_child_sum_len; } - if let Some(sum_tree_len) = in_sum_tree { + let sum_tree_node_size = if let Some((tree_cost_type, sum_tree_len)) = in_sum_tree { + let cost_size = tree_cost_type.cost_size(); paid_value_len -= sum_tree_len; - paid_value_len += 8; - } + paid_value_len += cost_size; + cost_size + } else { + 0 + }; // This is the moment we need to add the required space (after removing // children) but before adding the parent to child hook @@ -210,9 +234,6 @@ impl OperationCost { // Now we are the parent to child hook - // we need to add the sum tree node size - let sum_tree_node_size = if in_sum_tree.is_some() { 8 } else { 0 }; - // We need to add the cost of a parent // key_len has a hash length already in it from the key prefix // So we need to remove it and then add a hash length diff --git a/grovedb-epoch-based-storage-flags/Cargo.toml b/grovedb-epoch-based-storage-flags/Cargo.toml index 2e32ec2b..83df1463 100644 --- a/grovedb-epoch-based-storage-flags/Cargo.toml +++ b/grovedb-epoch-based-storage-flags/Cargo.toml @@ -2,13 +2,13 @@ name = "grovedb-epoch-based-storage-flags" authors = ["Samuel Westrich "] description = "Epoch based storage flags for GroveDB" -version = "2.2.1" +version = "3.0.0" edition = "2021" license = "MIT" repository = "https://github.com/dashpay/grovedb" [dependencies] -grovedb-costs = { version = "2.2.1", path = "../costs" } +grovedb-costs = { version = "3.0.0", path = "../costs" } hex = { version = "0.4.3" } integer-encoding = { version = "4.0.0" } diff --git a/grovedb-version/Cargo.toml b/grovedb-version/Cargo.toml index 5cbd2dbe..d1e6b08b 100644 --- a/grovedb-version/Cargo.toml +++ b/grovedb-version/Cargo.toml @@ -2,7 +2,7 @@ name = "grovedb-version" authors = ["Samuel Westrich "] description = "Versioning library for Platform" -version = "2.2.1" +version = "3.0.0" edition = "2021" license = "MIT" repository = "https://github.com/dashpay/grovedb" diff --git a/grovedb-version/src/lib.rs b/grovedb-version/src/lib.rs index 48b80a52..f66019d4 100644 --- a/grovedb-version/src/lib.rs +++ b/grovedb-version/src/lib.rs @@ -34,6 +34,23 @@ macro_rules! check_grovedb_v0 { }}; } +#[macro_export] +macro_rules! check_grovedb_v0_or_v1 { + ($method:expr, $version:expr) => {{ + const EXPECTED_VERSION_V0: u16 = 0; + const EXPECTED_VERSION_V1: u16 = 1; + if $version != EXPECTED_VERSION_V0 && $version != EXPECTED_VERSION_V1 { + return Err(GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION_V0, EXPECTED_VERSION_V1], + received: $version, + } + .into()); + } + $version + }}; +} + #[macro_export] macro_rules! check_merk_v0_with_cost { ($method:expr, $version:expr) => {{ diff --git a/grovedb-version/src/version/merk_versions.rs b/grovedb-version/src/version/merk_versions.rs index fac25f91..d0d122da 100644 --- a/grovedb-version/src/version/merk_versions.rs +++ b/grovedb-version/src/version/merk_versions.rs @@ -1,2 +1,12 @@ +use versioned_feature_core::FeatureVersion; + #[derive(Clone, Debug, Default)] -pub struct MerkVersions {} +pub struct MerkVersions { + pub average_case_costs: MerkAverageCaseCostsVersions, +} + +#[derive(Clone, Debug, Default)] +pub struct MerkAverageCaseCostsVersions { + pub add_average_case_merk_propagate: FeatureVersion, + pub sum_tree_estimated_size: FeatureVersion, +} diff --git a/grovedb-version/src/version/mod.rs b/grovedb-version/src/version/mod.rs index 06ac4e12..d795176c 100644 --- a/grovedb-version/src/version/mod.rs +++ b/grovedb-version/src/version/mod.rs @@ -1,11 +1,12 @@ pub mod grovedb_versions; pub mod merk_versions; pub mod v1; +pub mod v2; pub use versioned_feature_core::*; use crate::version::{ - grovedb_versions::GroveDBVersions, merk_versions::MerkVersions, v1::GROVE_V1, + grovedb_versions::GroveDBVersions, merk_versions::MerkVersions, v1::GROVE_V1, v2::GROVE_V2, }; #[derive(Clone, Debug, Default)] @@ -16,6 +17,12 @@ pub struct GroveVersion { } impl GroveVersion { + pub fn first<'a>() -> &'a Self { + GROVE_VERSIONS + .first() + .expect("expected to have a platform version") + } + pub fn latest<'a>() -> &'a Self { GROVE_VERSIONS .last() @@ -23,4 +30,4 @@ impl GroveVersion { } } -pub const GROVE_VERSIONS: &[GroveVersion] = &[GROVE_V1]; +pub const GROVE_VERSIONS: &[GroveVersion] = &[GROVE_V1, GROVE_V2]; diff --git a/grovedb-version/src/version/v1.rs b/grovedb-version/src/version/v1.rs index 97cfb38b..5bf58180 100644 --- a/grovedb-version/src/version/v1.rs +++ b/grovedb-version/src/version/v1.rs @@ -8,7 +8,7 @@ use crate::version::{ GroveDBOperationsWorstCaseVersions, GroveDBPathQueryMethodVersions, GroveDBReplicationVersions, GroveDBVersions, }, - merk_versions::MerkVersions, + merk_versions::{MerkAverageCaseCostsVersions, MerkVersions}, GroveVersion, }; @@ -184,5 +184,10 @@ pub const GROVE_V1: GroveVersion = GroveVersion { apply_chunk: 0, }, }, - merk_versions: MerkVersions {}, + merk_versions: MerkVersions { + average_case_costs: MerkAverageCaseCostsVersions { + add_average_case_merk_propagate: 0, + sum_tree_estimated_size: 0, + }, + }, }; diff --git a/grovedb-version/src/version/v2.rs b/grovedb-version/src/version/v2.rs new file mode 100644 index 00000000..3591ba1a --- /dev/null +++ b/grovedb-version/src/version/v2.rs @@ -0,0 +1,193 @@ +use crate::version::{ + grovedb_versions::{ + GroveDBApplyBatchVersions, GroveDBElementMethodVersions, + GroveDBOperationsAverageCaseVersions, GroveDBOperationsDeleteUpTreeVersions, + GroveDBOperationsDeleteVersions, GroveDBOperationsGetVersions, + GroveDBOperationsInsertVersions, GroveDBOperationsProofVersions, + GroveDBOperationsQueryVersions, GroveDBOperationsVersions, + GroveDBOperationsWorstCaseVersions, GroveDBPathQueryMethodVersions, + GroveDBReplicationVersions, GroveDBVersions, + }, + merk_versions::{MerkAverageCaseCostsVersions, MerkVersions}, + GroveVersion, +}; + +pub const GROVE_V2: GroveVersion = GroveVersion { + protocol_version: 1, + grovedb_versions: GroveDBVersions { + apply_batch: GroveDBApplyBatchVersions { + apply_batch_structure: 0, + apply_body: 0, + continue_partial_apply_body: 0, + apply_operations_without_batching: 0, + apply_batch: 0, + apply_partial_batch: 0, + open_batch_transactional_merk_at_path: 0, + open_batch_merk_at_path: 0, + apply_batch_with_element_flags_update: 0, + apply_partial_batch_with_element_flags_update: 0, + estimated_case_operations_for_batch: 0, + }, + element: GroveDBElementMethodVersions { + delete: 0, + delete_with_sectioned_removal_bytes: 0, + delete_into_batch_operations: 0, + element_at_key_already_exists: 0, + get: 0, + get_optional: 0, + get_from_storage: 0, + get_optional_from_storage: 1, + get_with_absolute_refs: 0, + get_value_hash: 0, + get_specialized_cost: 0, + value_defined_cost: 0, + value_defined_cost_for_serialized_value: 0, + specialized_costs_for_key_value: 0, + required_item_space: 0, + insert: 0, + insert_into_batch_operations: 0, + insert_if_not_exists: 0, + insert_if_not_exists_into_batch_operations: 0, + insert_if_changed_value: 0, + insert_if_changed_value_into_batch_operations: 0, + insert_reference: 0, + insert_reference_into_batch_operations: 0, + insert_subtree: 0, + insert_subtree_into_batch_operations: 0, + get_query: 0, + get_query_values: 0, + get_query_apply_function: 0, + get_path_query: 0, + get_sized_query: 0, + path_query_push: 0, + query_item: 0, + basic_push: 0, + serialize: 0, + serialized_size: 0, + deserialize: 0, + }, + operations: GroveDBOperationsVersions { + get: GroveDBOperationsGetVersions { + get: 0, + get_caching_optional: 0, + follow_reference: 0, + get_raw: 0, + get_raw_caching_optional: 0, + get_raw_optional: 0, + get_raw_optional_caching_optional: 0, + has_raw: 0, + check_subtree_exists_invalid_path: 0, + average_case_for_has_raw: 0, + average_case_for_has_raw_tree: 0, + average_case_for_get_raw: 0, + average_case_for_get: 0, + average_case_for_get_tree: 0, + worst_case_for_has_raw: 0, + worst_case_for_get_raw: 0, + worst_case_for_get: 0, + is_empty_tree: 0, + }, + insert: GroveDBOperationsInsertVersions { + insert: 0, + insert_on_transaction: 0, + insert_without_transaction: 0, + add_element_on_transaction: 0, + add_element_without_transaction: 0, + insert_if_not_exists: 0, + insert_if_not_exists_return_existing_element: 0, + insert_if_changed_value: 0, + }, + delete: GroveDBOperationsDeleteVersions { + delete: 0, + clear_subtree: 0, + delete_with_sectional_storage_function: 0, + delete_if_empty_tree: 0, + delete_if_empty_tree_with_sectional_storage_function: 0, + delete_operation_for_delete_internal: 0, + delete_internal_on_transaction: 0, + delete_internal_without_transaction: 0, + average_case_delete_operation_for_delete: 0, + worst_case_delete_operation_for_delete: 0, + }, + delete_up_tree: GroveDBOperationsDeleteUpTreeVersions { + delete_up_tree_while_empty: 0, + delete_up_tree_while_empty_with_sectional_storage: 0, + delete_operations_for_delete_up_tree_while_empty: 0, + add_delete_operations_for_delete_up_tree_while_empty: 0, + average_case_delete_operations_for_delete_up_tree_while_empty: 0, + worst_case_delete_operations_for_delete_up_tree_while_empty: 0, + }, + query: GroveDBOperationsQueryVersions { + query_encoded_many: 0, + query_many_raw: 0, + get_proved_path_query: 0, + query: 0, + query_item_value: 0, + query_item_value_or_sum: 0, + query_sums: 0, + query_raw: 0, + query_keys_optional: 0, + query_raw_keys_optional: 0, + follow_element: 0, + }, + proof: GroveDBOperationsProofVersions { + prove_query: 0, + prove_query_many: 0, + verify_query_with_options: 0, + verify_query_raw: 0, + verify_layer_proof: 0, + verify_query: 0, + verify_subset_query: 0, + verify_query_with_absence_proof: 0, + verify_subset_query_with_absence_proof: 0, + verify_query_with_chained_path_queries: 0, + }, + average_case: GroveDBOperationsAverageCaseVersions { + add_average_case_get_merk_at_path: 0, + average_case_merk_replace_tree: 1, // changed + average_case_merk_insert_tree: 0, + average_case_merk_delete_tree: 0, + average_case_merk_insert_element: 0, + average_case_merk_replace_element: 0, + average_case_merk_patch_element: 0, + average_case_merk_delete_element: 0, + add_average_case_has_raw_cost: 0, + add_average_case_has_raw_tree_cost: 0, + add_average_case_get_raw_cost: 0, + add_average_case_get_raw_tree_cost: 0, + add_average_case_get_cost: 0, + }, + worst_case: GroveDBOperationsWorstCaseVersions { + add_worst_case_get_merk_at_path: 0, + worst_case_merk_replace_tree: 0, + worst_case_merk_insert_tree: 0, + worst_case_merk_delete_tree: 0, + worst_case_merk_insert_element: 0, + worst_case_merk_replace_element: 0, + worst_case_merk_patch_element: 0, + worst_case_merk_delete_element: 0, + add_worst_case_has_raw_cost: 0, + add_worst_case_get_raw_tree_cost: 0, + add_worst_case_get_raw_cost: 0, + add_worst_case_get_cost: 0, + }, + }, + path_query_methods: GroveDBPathQueryMethodVersions { + terminal_keys: 0, + merge: 0, + query_items_at_path: 0, + }, + replication: GroveDBReplicationVersions { + get_subtrees_metadata: 0, + fetch_chunk: 0, + start_snapshot_syncing: 0, + apply_chunk: 0, + }, + }, + merk_versions: MerkVersions { + average_case_costs: MerkAverageCaseCostsVersions { + add_average_case_merk_propagate: 1, // changed + sum_tree_estimated_size: 1, // changed + }, + }, +}; diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index f977d0f4..5a2d998e 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb" description = "Fully featured database using balanced hierarchical authenticated data structures" -version = "2.2.1" +version = "3.0.0" authors = ["Samuel Westrich ", "Wisdom Ogwu "] edition = "2021" license = "MIT" @@ -11,13 +11,13 @@ readme = "../README.md" documentation = "https://docs.rs/grovedb" [dependencies] -grovedb-costs = { version = "2.2.1", path = "../costs" , optional = true } -grovedbg-types = { version = "2.2.1", path = "../grovedbg-types", optional = true } -grovedb-merk = { version = "2.2.1", path = "../merk", optional = true, default-features = false } -grovedb-path = { version = "2.2.1", path = "../path" } -grovedb-storage = { version = "2.2.1", path = "../storage", optional = true } -grovedb-version = { version = "2.2.1", path = "../grovedb-version" } -grovedb-visualize = { version = "2.2.1", path = "../visualize", optional = true } +grovedb-costs = { version = "3.0.0", path = "../costs" , optional = true } +grovedbg-types = { version = "3.0.0", path = "../grovedbg-types", optional = true } +grovedb-merk = { version = "3.0.0", path = "../merk", optional = true, default-features = false } +grovedb-path = { version = "3.0.0", path = "../path" } +grovedb-storage = { version = "3.0.0", path = "../storage", optional = true } +grovedb-version = { version = "3.0.0", path = "../grovedb-version" } +grovedb-visualize = { version = "3.0.0", path = "../visualize", optional = true } axum = { version = "=0.7.5", features = ["macros"], optional = true } bincode = { version = "2.0.0-rc.3" } @@ -36,19 +36,20 @@ zip-extensions = { version = "0.8.1", optional = true } serde = { version = "1.0.210", features = ["derive"], optional = true } [dev-dependencies] -grovedb-epoch-based-storage-flags = { version = "2.2.1", path = "../grovedb-epoch-based-storage-flags" } +grovedb-epoch-based-storage-flags = { version = "3.0.0", path = "../grovedb-epoch-based-storage-flags" } criterion = "0.5.1" hex = "0.4.3" pretty_assertions = "1.4.0" rand = "0.8.5" +assert_matches = "1.5.0" [[bench]] name = "insertion_benchmark" harness = false [features] -default = ["full"] +default = ["full", "estimated_costs"] proof_debug = ["grovedb-merk/proof_debug"] serde = ["dep:serde", "grovedb-merk/serde", "indexmap/serde"] full = [ diff --git a/grovedb/src/batch/batch_structure.rs b/grovedb/src/batch/batch_structure.rs index 03694379..59e428c6 100644 --- a/grovedb/src/batch/batch_structure.rs +++ b/grovedb/src/batch/batch_structure.rs @@ -17,7 +17,7 @@ use intmap::IntMap; #[cfg(feature = "minimal")] use crate::{ batch::{key_info::KeyInfo, GroveOp, KeyInfoPath, QualifiedGroveDbOp, TreeCache}, - Element, ElementFlags, Error, + ElementFlags, Error, }; #[cfg(feature = "minimal")] @@ -124,17 +124,14 @@ where | GroveOp::InsertOrReplace { element } | GroveOp::Replace { element } | GroveOp::Patch { element, .. } => { - if let Element::Tree(..) = element { - cost_return_on_error!(&mut cost, merk_tree_cache.insert(&op, false)); - } else if let Element::SumTree(..) = element { - cost_return_on_error!(&mut cost, merk_tree_cache.insert(&op, true)); + if let Some(tree_type) = element.tree_type() { + cost_return_on_error!(&mut cost, merk_tree_cache.insert(&op, tree_type)); } Ok(()) } - GroveOp::RefreshReference { .. } - | GroveOp::Delete - | GroveOp::DeleteTree - | GroveOp::DeleteSumTree => Ok(()), + GroveOp::RefreshReference { .. } | GroveOp::Delete | GroveOp::DeleteTree(_) => { + Ok(()) + } GroveOp::ReplaceTreeRootKey { .. } | GroveOp::InsertTreeWithRootHash { .. } => { Err(Error::InvalidBatchOperation( "replace and insert tree hash are internal operations only", diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index 8d4c076a..310c5863 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -10,12 +10,11 @@ use std::{ use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -use grovedb_merk::RootHashKeyAndSum; #[cfg(feature = "minimal")] -use grovedb_merk::{ - estimated_costs::average_case_costs::{average_case_merk_propagate, EstimatedLayerInformation}, - IsSumTree, +use grovedb_merk::estimated_costs::average_case_costs::{ + average_case_merk_propagate, EstimatedLayerInformation, }; +use grovedb_merk::{tree::AggregateData, tree_type::TreeType, RootHashKeyAndAggregateData}; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::version::GroveVersion; @@ -44,7 +43,7 @@ impl GroveOp { propagate: bool, grove_version: &GroveVersion, ) -> CostResult<(), Error> { - let in_tree_using_sums = layer_element_estimates.is_sum_tree; + let in_tree_type = layer_element_estimates.tree_type; let propagate_if_input = || { if propagate { Some(layer_element_estimates) @@ -53,28 +52,32 @@ impl GroveOp { } }; match self { - GroveOp::ReplaceTreeRootKey { sum, .. } => GroveDb::average_case_merk_replace_tree( - key, - layer_element_estimates, - sum.is_some(), - propagate, - grove_version, - ), - GroveOp::InsertTreeWithRootHash { flags, sum, .. } => { - GroveDb::average_case_merk_insert_tree( + GroveOp::ReplaceTreeRootKey { aggregate_data, .. } => { + GroveDb::average_case_merk_replace_tree( key, - flags, - sum.is_some(), - in_tree_using_sums, - propagate_if_input(), + layer_element_estimates, + aggregate_data.parent_tree_type(), + propagate, grove_version, ) } + GroveOp::InsertTreeWithRootHash { + flags, + aggregate_data, + .. + } => GroveDb::average_case_merk_insert_tree( + key, + flags, + aggregate_data.parent_tree_type(), + in_tree_type, + propagate_if_input(), + grove_version, + ), GroveOp::InsertOrReplace { element } | GroveOp::InsertOnly { element } => { GroveDb::average_case_merk_insert_element( key, element, - in_tree_using_sums, + in_tree_type, propagate_if_input(), grove_version, ) @@ -91,14 +94,14 @@ impl GroveOp { *max_reference_hop, flags.clone(), ), - in_tree_using_sums, + in_tree_type, propagate_if_input(), grove_version, ), GroveOp::Replace { element } => GroveDb::average_case_merk_replace_element( key, element, - in_tree_using_sums, + in_tree_type, propagate_if_input(), grove_version, ), @@ -109,7 +112,7 @@ impl GroveOp { key, element, *change_in_bytes, - in_tree_using_sums, + in_tree_type, propagate_if_input(), grove_version, ), @@ -119,16 +122,9 @@ impl GroveOp { propagate, grove_version, ), - GroveOp::DeleteTree => GroveDb::average_case_merk_delete_tree( + GroveOp::DeleteTree(tree_type) => GroveDb::average_case_merk_delete_tree( key, - false, - layer_element_estimates, - propagate, - grove_version, - ), - GroveOp::DeleteSumTree => GroveDb::average_case_merk_delete_tree( - key, - true, + *tree_type, layer_element_estimates, propagate, grove_version, @@ -142,7 +138,7 @@ impl GroveOp { #[derive(Default)] pub(in crate::batch) struct AverageCaseTreeCacheKnownPaths { paths: HashMap, - cached_merks: HashMap, + cached_merks: HashMap, } #[cfg(feature = "minimal")] @@ -167,7 +163,7 @@ impl fmt::Debug for AverageCaseTreeCacheKnownPaths { #[cfg(feature = "minimal")] impl TreeCache for AverageCaseTreeCacheKnownPaths { - fn insert(&mut self, op: &QualifiedGroveDbOp, is_sum_tree: bool) -> CostResult<(), Error> { + fn insert(&mut self, op: &QualifiedGroveDbOp, tree_type: TreeType) -> CostResult<(), Error> { let mut average_case_cost = OperationCost::default(); let mut inserted_path = op.path.clone(); inserted_path.push(op.key.clone()); @@ -175,7 +171,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { // empty at this point. // There is however a hash call that creates the prefix average_case_cost.hash_node_calls += 1; - self.cached_merks.insert(inserted_path, is_sum_tree); + self.cached_merks.insert(inserted_path, tree_type); Ok(()).wrap_with_cost(average_case_cost) } @@ -192,7 +188,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { _flags_update: &mut G, _split_removal_bytes: &mut SR, grove_version: &GroveVersion, - ) -> CostResult { + ) -> CostResult { let mut cost = OperationCost::default(); let layer_element_estimates = cost_return_on_error_no_add!( @@ -238,12 +234,11 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { &mut cost, path, layer_should_be_empty, - layer_info.is_sum_tree, + layer_info.tree_type, grove_version, ) ); - self.cached_merks - .insert(path.clone(), layer_info.is_sum_tree); + self.cached_merks.insert(path.clone(), layer_info.tree_type); } for (key, op) in ops_at_path_by_key.into_iter() { @@ -255,9 +250,10 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { cost_return_on_error!( &mut cost, - average_case_merk_propagate(layer_element_estimates).map_err(Error::MerkError) + average_case_merk_propagate(layer_element_estimates, grove_version) + .map_err(Error::MerkError) ); - Ok(([0u8; 32], None, None)).wrap_with_cost(cost) + Ok(([0u8; 32], None, AggregateData::NoAggregateData)).wrap_with_cost(cost) } fn update_base_merk_root_key( @@ -279,12 +275,12 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { estimated_layer_info .estimated_layer_count .estimated_to_be_empty(), - estimated_layer_info.is_sum_tree, + estimated_layer_info.tree_type, grove_version ) ); self.cached_merks - .insert(base_path, estimated_layer_info.is_sum_tree); + .insert(base_path, estimated_layer_info.tree_type); } } Ok(()).wrap_with_cost(cost) @@ -300,11 +296,14 @@ mod tests { storage_cost::{removal::StorageRemovedBytes::NoStorageRemoval, StorageCost}, OperationCost, }; - use grovedb_merk::estimated_costs::average_case_costs::{ - EstimatedLayerCount::{ApproximateElements, EstimatedLevel, PotentiallyAtMaxElements}, - EstimatedLayerInformation, - EstimatedLayerSizes::{AllItems, AllSubtrees}, - EstimatedSumTrees::{NoSumTrees, SomeSumTrees}, + use grovedb_merk::{ + estimated_costs::average_case_costs::{ + EstimatedLayerCount::{ApproximateElements, EstimatedLevel, PotentiallyAtMaxElements}, + EstimatedLayerInformation, + EstimatedLayerSizes::{AllItems, AllSubtrees}, + EstimatedSumTrees::{NoSumTrees, SomeSumTrees}, + }, + tree_type::TreeType, }; use grovedb_version::version::GroveVersion; @@ -332,7 +331,7 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: ApproximateElements(0), estimated_layer_sizes: AllSubtrees(4, NoSumTrees, None), }, @@ -401,7 +400,7 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(0, true), estimated_layer_sizes: AllSubtrees(4, NoSumTrees, Some(3)), }, @@ -409,7 +408,7 @@ mod tests { paths.insert( KeyInfoPath(vec![KeyInfo::KnownKey(b"key1".to_vec())]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(0, true), estimated_layer_sizes: AllSubtrees(4, NoSumTrees, None), }, @@ -468,7 +467,7 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(0, true), estimated_layer_sizes: AllItems(4, 3, None), }, @@ -541,7 +540,7 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(1, false), estimated_layer_sizes: AllSubtrees(1, NoSumTrees, None), }, @@ -627,7 +626,7 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(0, false), estimated_layer_sizes: AllSubtrees(1, NoSumTrees, None), }, @@ -636,7 +635,7 @@ mod tests { paths.insert( KeyInfoPath(vec![KeyInfo::KnownKey(b"0".to_vec())]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(0, true), estimated_layer_sizes: AllSubtrees(4, NoSumTrees, None), }, @@ -707,12 +706,15 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(1, false), estimated_layer_sizes: AllSubtrees( 1, SomeSumTrees { sum_trees_weight: 1, + big_sum_trees_weight: 0, + count_trees_weight: 0, + count_sum_trees_weight: 0, non_sum_trees_weight: 1, }, None, @@ -722,7 +724,7 @@ mod tests { paths.insert( KeyInfoPath::from_known_owned_path(vec![vec![7]]), EstimatedLayerInformation { - is_sum_tree: true, + tree_type: TreeType::SumTree, estimated_layer_count: PotentiallyAtMaxElements, estimated_layer_sizes: AllItems(32, 8, None), }, @@ -785,7 +787,7 @@ mod tests { paths.insert( KeyInfoPath(vec![]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(1, false), estimated_layer_sizes: AllSubtrees(4, NoSumTrees, None), }, @@ -794,7 +796,7 @@ mod tests { paths.insert( KeyInfoPath(vec![KeyInfo::KnownKey(b"0".to_vec())]), EstimatedLayerInformation { - is_sum_tree: false, + tree_type: TreeType::NormalTree, estimated_layer_count: EstimatedLevel(0, true), estimated_layer_sizes: AllSubtrees(4, NoSumTrees, None), }, diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index 5e4f6e42..b48109ad 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -14,7 +14,7 @@ use grovedb_costs::{ use grovedb_merk::estimated_costs::worst_case_costs::{ worst_case_merk_propagate, WorstCaseLayerInformation, }; -use grovedb_merk::RootHashKeyAndSum; +use grovedb_merk::{tree::AggregateData, tree_type::TreeType, RootHashKeyAndAggregateData}; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::version::GroveVersion; @@ -36,7 +36,7 @@ impl GroveOp { fn worst_case_cost( &self, key: &KeyInfo, - is_in_parent_sum_tree: bool, + in_parent_tree_type: TreeType, worst_case_layer_element_estimates: &WorstCaseLayerInformation, propagate: bool, grove_version: &GroveVersion, @@ -49,29 +49,33 @@ impl GroveOp { } }; match self { - GroveOp::ReplaceTreeRootKey { sum, .. } => GroveDb::worst_case_merk_replace_tree( - key, - sum.is_some(), - is_in_parent_sum_tree, - worst_case_layer_element_estimates, - propagate, - grove_version, - ), - GroveOp::InsertTreeWithRootHash { flags, sum, .. } => { - GroveDb::worst_case_merk_insert_tree( + GroveOp::ReplaceTreeRootKey { aggregate_data, .. } => { + GroveDb::worst_case_merk_replace_tree( key, - flags, - sum.is_some(), - is_in_parent_sum_tree, - propagate_if_input(), + aggregate_data.parent_tree_type(), + in_parent_tree_type, + worst_case_layer_element_estimates, + propagate, grove_version, ) } + GroveOp::InsertTreeWithRootHash { + flags, + aggregate_data, + .. + } => GroveDb::worst_case_merk_insert_tree( + key, + flags, + aggregate_data.parent_tree_type(), + in_parent_tree_type, + propagate_if_input(), + grove_version, + ), GroveOp::InsertOrReplace { element } | GroveOp::InsertOnly { element } => { GroveDb::worst_case_merk_insert_element( key, element, - is_in_parent_sum_tree, + in_parent_tree_type, propagate_if_input(), grove_version, ) @@ -88,14 +92,14 @@ impl GroveOp { *max_reference_hop, flags.clone(), ), - is_in_parent_sum_tree, + in_parent_tree_type, propagate_if_input(), grove_version, ), GroveOp::Replace { element } => GroveDb::worst_case_merk_replace_element( key, element, - is_in_parent_sum_tree, + in_parent_tree_type, propagate_if_input(), grove_version, ), @@ -105,7 +109,7 @@ impl GroveOp { } => GroveDb::worst_case_merk_replace_element( key, element, - is_in_parent_sum_tree, + in_parent_tree_type, propagate_if_input(), grove_version, ), @@ -115,16 +119,9 @@ impl GroveOp { propagate, grove_version, ), - GroveOp::DeleteTree => GroveDb::worst_case_merk_delete_tree( - key, - false, - worst_case_layer_element_estimates, - propagate, - grove_version, - ), - GroveOp::DeleteSumTree => GroveDb::worst_case_merk_delete_tree( + GroveOp::DeleteTree(tree_type) => GroveDb::worst_case_merk_delete_tree( key, - true, + *tree_type, worst_case_layer_element_estimates, propagate, grove_version, @@ -163,7 +160,7 @@ impl fmt::Debug for WorstCaseTreeCacheKnownPaths { #[cfg(feature = "minimal")] impl TreeCache for WorstCaseTreeCacheKnownPaths { - fn insert(&mut self, op: &QualifiedGroveDbOp, _is_sum_tree: bool) -> CostResult<(), Error> { + fn insert(&mut self, op: &QualifiedGroveDbOp, _tree_type: TreeType) -> CostResult<(), Error> { let mut worst_case_cost = OperationCost::default(); let mut inserted_path = op.path.clone(); inserted_path.push(op.key.clone()); @@ -188,7 +185,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { _flags_update: &mut G, _split_removal_bytes: &mut SR, grove_version: &GroveVersion, - ) -> CostResult { + ) -> CostResult { let mut cost = OperationCost::default(); let worst_case_layer_element_estimates = cost_return_on_error_no_add!( @@ -208,7 +205,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { GroveDb::add_worst_case_get_merk_at_path::( &mut cost, path, - false, + TreeType::NormalTree, grove_version, ) ); @@ -220,7 +217,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { &mut cost, op.worst_case_cost( &key, - false, + TreeType::NormalTree, worst_case_layer_element_estimates, false, grove_version @@ -232,7 +229,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { &mut cost, worst_case_merk_propagate(worst_case_layer_element_estimates).map_err(Error::MerkError) ); - Ok(([0u8; 32], None, None)).wrap_with_cost(cost) + Ok(([0u8; 32], None, AggregateData::NoAggregateData)).wrap_with_cost(cost) } fn update_base_merk_root_key( @@ -251,7 +248,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { GroveDb::add_worst_case_get_merk_at_path::( &mut cost, &base_path, - false, + TreeType::NormalTree, grove_version, ) ); diff --git a/grovedb/src/batch/just_in_time_reference_update.rs b/grovedb/src/batch/just_in_time_reference_update.rs index f4385b89..06081eb2 100644 --- a/grovedb/src/batch/just_in_time_reference_update.rs +++ b/grovedb/src/batch/just_in_time_reference_update.rs @@ -10,6 +10,7 @@ use grovedb_costs::{ }; use grovedb_merk::{ tree::{kv::KV, value_hash, TreeNode}, + tree_type::TreeType, CryptoHash, Merk, }; use grovedb_storage::StorageContext; @@ -31,7 +32,7 @@ where new_element: &mut Element, old_element: Element, old_serialized_element: &[u8], - is_in_sum_tree: bool, + in_tree_type: TreeType, flags_update: &mut G, split_removal_bytes: &mut SR, grove_version: &GroveVersion, @@ -79,7 +80,7 @@ where let old_storage_cost = KV::node_value_byte_cost_size( key.len() as u32, old_serialized_element.len() as u32, - is_in_sum_tree, + in_tree_type.inner_node_type(), ); let original_new_element = new_element.clone(); @@ -99,10 +100,14 @@ where KV::node_value_byte_cost_size( key.len() as u32, serialized_with_old_flags.len() as u32, - is_in_sum_tree, + in_tree_type.inner_node_type(), ) } else { - KV::node_value_byte_cost_size(key.len() as u32, serialized.len() as u32, is_in_sum_tree) + KV::node_value_byte_cost_size( + key.len() as u32, + serialized.len() as u32, + in_tree_type.inner_node_type(), + ) }; let mut i = 0; @@ -153,7 +158,7 @@ where new_storage_cost = KV::node_value_byte_cost_size( key.len() as u32, new_serialized_bytes.len() as u32, - is_in_sum_tree, + in_tree_type.inner_node_type(), ); if serialization_to_use == new_serialized_bytes { diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 7f73fea0..3d35525c 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -50,10 +50,10 @@ use grovedb_costs::{ use grovedb_merk::{ tree::{ kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, - value_hash, NULL_HASH, + value_hash, AggregateData, NULL_HASH, }, - CryptoHash, Error as MerkError, Merk, MerkType, Op, RootHashKeyAndSum, - TreeFeatureType::{BasicMerkNode, SummedMerkNode}, + tree_type::TreeType, + CryptoHash, Error as MerkError, Merk, MerkType, Op, RootHashKeyAndAggregateData, }; use grovedb_path::SubtreePath; use grovedb_storage::{ @@ -74,7 +74,10 @@ pub use crate::batch::batch_structure::{OpsByLevelPath, OpsByPath}; use crate::batch::estimated_costs::EstimatedCostsType; use crate::{ batch::{batch_structure::BatchStructure, mode::BatchRunMode}, - element::{MaxReferenceHop, SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, + element::{ + MaxReferenceHop, BIG_SUM_TREE_COST_SIZE, COUNT_SUM_TREE_COST_SIZE, COUNT_TREE_COST_SIZE, + SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE, + }, operations::{get::MAX_REFERENCE_HOPS, proof::util::hex_to_ascii}, reference_path::{ path_from_reference_path_type, path_from_reference_qualified_path_type, ReferencePathType, @@ -91,8 +94,8 @@ pub enum GroveOp { hash: [u8; 32], /// Root key root_key: Option>, - /// Sum - sum: Option, + /// Aggregate data + aggregate_data: AggregateData, }, /// Inserts an element that is known to not yet exist InsertOnly { @@ -124,8 +127,8 @@ pub enum GroveOp { root_key: Option>, /// Flags flags: Option, - /// Sum - sum: Option, + /// Aggregate Data such as sum + aggregate_data: AggregateData, }, /// Refresh the reference with information provided /// Providing this information is necessary to be able to calculate @@ -142,16 +145,14 @@ pub enum GroveOp { /// Delete Delete, /// Delete tree - DeleteTree, - /// Delete sum tree - DeleteSumTree, + DeleteTree(TreeType), } impl GroveOp { fn to_u8(&self) -> u8 { match self { - GroveOp::DeleteTree => 0, - GroveOp::DeleteSumTree => 1, + GroveOp::DeleteTree(_) => 0, + // 1 used to be used for the DeleteSumTree GroveOp::Delete => 2, GroveOp::InsertTreeWithRootHash { .. } => 3, GroveOp::ReplaceTreeRootKey { .. } => 4, @@ -378,8 +379,7 @@ impl fmt::Debug for QualifiedGroveDbOp { ) } GroveOp::Delete => "Delete".to_string(), - GroveOp::DeleteTree => "Delete Tree".to_string(), - GroveOp::DeleteSumTree => "Delete Sum Tree".to_string(), + GroveOp::DeleteTree(tree_type) => format!("Delete Tree {}", tree_type), GroveOp::ReplaceTreeRootKey { .. } => "Replace Tree Hash and Root Key".to_string(), GroveOp::InsertTreeWithRootHash { .. } => "Insert Tree Hash and Root Key".to_string(), }; @@ -509,16 +509,12 @@ impl QualifiedGroveDbOp { } /// A delete tree op using a known owned path and known key - pub fn delete_tree_op(path: Vec>, key: Vec, is_sum_tree: bool) -> Self { + pub fn delete_tree_op(path: Vec>, key: Vec, tree_type: TreeType) -> Self { let path = KeyInfoPath::from_known_owned_path(path); Self { path, key: KnownKey(key), - op: if is_sum_tree { - GroveOp::DeleteSumTree - } else { - GroveOp::DeleteTree - }, + op: GroveOp::DeleteTree(tree_type), } } @@ -532,15 +528,11 @@ impl QualifiedGroveDbOp { } /// A delete tree op - pub fn delete_estimated_tree_op(path: KeyInfoPath, key: KeyInfo, is_sum_tree: bool) -> Self { + pub fn delete_estimated_tree_op(path: KeyInfoPath, key: KeyInfo, tree_type: TreeType) -> Self { Self { path, key, - op: if is_sum_tree { - GroveOp::DeleteSumTree - } else { - GroveOp::DeleteTree - }, + op: GroveOp::DeleteTree(tree_type), } } @@ -681,7 +673,7 @@ impl fmt::Debug for TreeCacheMerkByPath { } trait TreeCache { - fn insert(&mut self, op: &QualifiedGroveDbOp, is_sum_tree: bool) -> CostResult<(), Error>; + fn insert(&mut self, op: &QualifiedGroveDbOp, tree_type: TreeType) -> CostResult<(), Error>; fn get_batch_run_mode(&self) -> BatchRunMode; @@ -695,7 +687,7 @@ trait TreeCache { flags_update: &mut G, split_removal_bytes: &mut SR, grove_version: &GroveVersion, - ) -> CostResult; + ) -> CostResult; fn update_base_merk_root_key( &mut self, @@ -863,10 +855,10 @@ where /// /// # Returns /// - /// * `Ok((Element, Vec, bool))` - Returns the deserialized `Element` - /// and the serialized counterpart if the retrieval and deserialization - /// are successful, wrapped in the associated cost. Also returns if the - /// merk of the element is a sum tree as a bool. + /// * `Ok((Element, Vec, TreeType))` - Returns the deserialized + /// `Element` and the serialized counterpart if the retrieval and + /// deserialization are successful, wrapped in the associated cost. Also + /// returns if the merk of the element is a sum tree as a TreeType. /// * `Err(Error)` - Returns an error if any issue occurs during the /// retrieval or deserialization of the referenced element. /// @@ -883,7 +875,7 @@ where key: &[u8], reference_path: &[Vec], grove_version: &GroveVersion, - ) -> CostResult, bool)>, Error> { + ) -> CostResult, TreeType)>, Error> { let mut cost = OperationCost::default(); let merk = match self.merks.entry(reference_path.to_vec()) { @@ -905,7 +897,7 @@ where .map_err(|e| Error::CorruptedData(e.to_string())) ); - let is_sum_tree = merk.is_sum_tree; + let tree_type = merk.tree_type; if let Some(referenced_element) = referenced_element { let element = cost_return_on_error_no_add!( @@ -915,7 +907,7 @@ where }) ); - Ok(Some((element, referenced_element, is_sum_tree))).wrap_with_cost(cost) + Ok(Some((element, referenced_element, tree_type))).wrap_with_cost(cost) } else { Ok(None).wrap_with_cost(cost) } @@ -1027,7 +1019,11 @@ where grove_version, ) } - Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidBatchOperation( + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => Err(Error::InvalidBatchOperation( "references can not point to trees being updated", )) .wrap_with_cost(cost), @@ -1145,12 +1141,14 @@ where grove_version, ) } - Element::Tree(..) | Element::SumTree(..) => { - Err(Error::InvalidBatchOperation( - "references can not point to trees being updated", - )) - .wrap_with_cost(cost) - } + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => Err(Error::InvalidBatchOperation( + "references can not point to trees being updated", + )) + .wrap_with_cost(cost), } } GroveOp::InsertOnly { element } => match element { @@ -1174,7 +1172,11 @@ where grove_version, ) } - Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidBatchOperation( + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => Err(Error::InvalidBatchOperation( "references can not point to trees being updated", )) .wrap_with_cost(cost), @@ -1200,12 +1202,10 @@ where grove_version, ) } - GroveOp::Delete | GroveOp::DeleteTree | GroveOp::DeleteSumTree => { - Err(Error::InvalidBatchOperation( - "references can not point to something currently being deleted", - )) - .wrap_with_cost(cost) - } + GroveOp::Delete | GroveOp::DeleteTree(_) => Err(Error::InvalidBatchOperation( + "references can not point to something currently being deleted", + )) + .wrap_with_cost(cost), } } else { self.process_reference( @@ -1232,7 +1232,7 @@ where F: FnMut(&[Vec], bool) -> CostResult, Error>, S: StorageContext<'db>, { - fn insert(&mut self, op: &QualifiedGroveDbOp, is_sum_tree: bool) -> CostResult<(), Error> { + fn insert(&mut self, op: &QualifiedGroveDbOp, tree_type: TreeType) -> CostResult<(), Error> { let mut cost = OperationCost::default(); let mut inserted_path = op.path.to_path(); @@ -1240,7 +1240,7 @@ where if let HashMapEntry::Vacant(e) = self.merks.entry(inserted_path.clone()) { let mut merk = cost_return_on_error!(&mut cost, (self.get_merk_fn)(&inserted_path, true)); - merk.is_sum_tree = is_sum_tree; + merk.tree_type = tree_type; e.insert(merk); } @@ -1277,14 +1277,14 @@ where flags_update: &mut G, split_removal_bytes: &mut SR, grove_version: &GroveVersion, - ) -> CostResult { + ) -> CostResult { let mut cost = OperationCost::default(); // todo: fix this let p = path.to_path(); let path = &p; // This also populates Merk trees cache - let is_sum_tree = { + let in_tree_type = { let merk = match self.merks.entry(path.to_vec()) { HashMapEntry::Occupied(o) => o.into_mut(), HashMapEntry::Vacant(v) => v.insert(cost_return_on_error!( @@ -1292,7 +1292,7 @@ where (self.get_merk_fn)(path, false) )), }; - merk.is_sum_tree + merk.tree_type }; let mut batch_operations: Vec<(Vec, Op)> = vec![]; @@ -1306,7 +1306,7 @@ where let merk_feature_type = cost_return_on_error!( &mut cost, element - .get_feature_type(is_sum_tree) + .get_feature_type(in_tree_type) .wrap_with_cost(OperationCost::default()) ); let path_reference = cost_return_on_error!( @@ -1348,11 +1348,15 @@ where ) ); } - Element::Tree(..) | Element::SumTree(..) => { + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => { let merk_feature_type = cost_return_on_error!( &mut cost, element - .get_feature_type(is_sum_tree) + .get_feature_type(in_tree_type) .wrap_with_cost(OperationCost::default()) ); cost_return_on_error!( @@ -1371,7 +1375,7 @@ where let merk_feature_type = cost_return_on_error!( &mut cost, element - .get_feature_type(is_sum_tree) + .get_feature_type(in_tree_type) .wrap_with_cost(OperationCost::default()) ); if batch_apply_options.validate_insertion_does_not_override { @@ -1450,11 +1454,7 @@ where .wrap_with_cost(cost); }; - let merk_feature_type = if is_sum_tree { - SummedMerkNode(0) - } else { - BasicMerkNode - }; + let merk_feature_type = in_tree_type.empty_tree_feature_type(); let path_reference = cost_return_on_error!( &mut cost, @@ -1501,32 +1501,20 @@ where Element::delete_into_batch_operations( key_info.get_key(), false, - is_sum_tree, /* we are in a sum tree, this might or might not be a - * sum item */ + in_tree_type, /* we are in a sum tree, this might or might not be a + * sum item */ &mut batch_operations, grove_version ) ); } - GroveOp::DeleteTree => { + GroveOp::DeleteTree(tree_type) => { cost_return_on_error!( &mut cost, Element::delete_into_batch_operations( key_info.get_key(), true, - false, - &mut batch_operations, - grove_version - ) - ); - } - GroveOp::DeleteSumTree => { - cost_return_on_error!( - &mut cost, - Element::delete_into_batch_operations( - key_info.get_key(), - true, - true, + tree_type, &mut batch_operations, grove_version ) @@ -1535,7 +1523,7 @@ where GroveOp::ReplaceTreeRootKey { hash, root_key, - sum, + aggregate_data, } => { let merk = self.merks.get(path).expect("the Merk is cached"); cost_return_on_error!( @@ -1545,7 +1533,7 @@ where key_info.get_key(), root_key, hash, - sum, + aggregate_data, &mut batch_operations, grove_version ) @@ -1555,16 +1543,40 @@ where hash, root_key, flags, - sum, + aggregate_data, } => { - let element = match sum { - None => Element::new_tree_with_flags(root_key, flags), - Some(sum_value) => Element::new_sum_tree_with_flags_and_sum_value( - root_key, sum_value, flags, - ), + let element = match aggregate_data { + AggregateData::NoAggregateData => { + Element::new_tree_with_flags(root_key, flags) + } + AggregateData::Sum(sum_value) => { + Element::new_sum_tree_with_flags_and_sum_value( + root_key, sum_value, flags, + ) + } + AggregateData::BigSum(sum_value) => { + Element::new_big_sum_tree_with_flags_and_sum_value( + root_key, sum_value, flags, + ) + } + AggregateData::Count(count_value) => { + Element::new_count_tree_with_flags_and_count_value( + root_key, + count_value, + flags, + ) + } + AggregateData::CountAndSum(count_value, sum_value) => { + Element::new_count_sum_tree_with_flags_and_sum_and_count_value( + root_key, + count_value, + sum_value, + flags, + ) + } }; let merk_feature_type = - cost_return_on_error_no_add!(&cost, element.get_feature_type(is_sum_tree)); + cost_return_on_error_no_add!(&cost, element.get_feature_type(in_tree_type)); cost_return_on_error!( &mut cost, @@ -1590,8 +1602,13 @@ where &[], Some(batch_apply_options.as_merk_options()), &|key, value| { - Element::specialized_costs_for_key_value(key, value, is_sum_tree, grove_version) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + Element::specialized_costs_for_key_value( + key, + value, + in_tree_type.inner_node_type(), + grove_version, + ) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), &|old_value, new_value| { @@ -1642,11 +1659,18 @@ where // we need to give back the value defined cost in the case that the // new element is a tree match new_element { - Element::Tree(..) | Element::SumTree(..) => { - let tree_cost_size = if new_element.is_sum_tree() { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => { + let tree_type = new_element.tree_type().unwrap(); + let tree_cost_size = match tree_type { + TreeType::NormalTree => TREE_COST_SIZE, + TreeType::SumTree => SUM_TREE_COST_SIZE, + TreeType::BigSumTree => BIG_SUM_TREE_COST_SIZE, + TreeType::CountTree => COUNT_TREE_COST_SIZE, + TreeType::CountSumTree => COUNT_SUM_TREE_COST_SIZE, }; let tree_value_cost = tree_cost_size + flags_len @@ -1690,7 +1714,7 @@ where .map_err(|e| Error::CorruptedData(e.to_string())) ); let r = merk - .root_hash_key_and_sum() + .root_hash_key_and_aggregate_data() .add_cost(cost) .map_err(Error::MerkError); @@ -1776,7 +1800,7 @@ impl GroveDb { ); } } else { - let (root_hash, calculated_root_key, sum_value) = cost_return_on_error!( + let (root_hash, calculated_root_key, aggregate_data) = cost_return_on_error!( &mut cost, merk_tree_cache.execute_ops_on_path( &path, @@ -1806,7 +1830,7 @@ impl GroveDb { GroveOp::ReplaceTreeRootKey { hash: root_hash, root_key: calculated_root_key, - sum: sum_value, + aggregate_data, } .into(), ); @@ -1817,11 +1841,11 @@ impl GroveDb { GroveOp::ReplaceTreeRootKey { hash, root_key, - sum, + aggregate_data: aggregate_data_entry, } => { *hash = root_hash; *root_key = calculated_root_key; - *sum = sum_value; + *aggregate_data_entry = aggregate_data; } GroveOp::InsertTreeWithRootHash { .. } => { return Err(Error::CorruptedCodeExecution( @@ -1839,7 +1863,8 @@ impl GroveDb { hash: root_hash, root_key: calculated_root_key, flags: flags.clone(), - sum: None, + aggregate_data: + AggregateData::NoAggregateData, } .into(); } else if let Element::SumTree(.., flags) = @@ -1850,7 +1875,40 @@ impl GroveDb { hash: root_hash, root_key: calculated_root_key, flags: flags.clone(), - sum: sum_value, + aggregate_data, + } + .into(); + } else if let Element::BigSumTree(.., flags) = + element + { + *mutable_occupied_entry = + GroveOp::InsertTreeWithRootHash { + hash: root_hash, + root_key: calculated_root_key, + flags: flags.clone(), + aggregate_data, + } + .into(); + } else if let Element::CountTree(.., flags) = + element + { + *mutable_occupied_entry = + GroveOp::InsertTreeWithRootHash { + hash: root_hash, + root_key: calculated_root_key, + flags: flags.clone(), + aggregate_data, + } + .into(); + } else if let Element::CountSumTree(.., flags) = + element + { + *mutable_occupied_entry = + GroveOp::InsertTreeWithRootHash { + hash: root_hash, + root_key: calculated_root_key, + flags: flags.clone(), + aggregate_data, } .into(); } else { @@ -1867,9 +1925,7 @@ impl GroveDb { )) .wrap_with_cost(cost); } - GroveOp::Delete - | GroveOp::DeleteTree - | GroveOp::DeleteSumTree => { + GroveOp::Delete | GroveOp::DeleteTree(_) => { if calculated_root_key.is_some() { return Err(Error::InvalidBatchOperation( "modification of tree when it will be \ @@ -1889,7 +1945,7 @@ impl GroveDb { GroveOp::ReplaceTreeRootKey { hash: root_hash, root_key: calculated_root_key, - sum: sum_value, + aggregate_data, }, ); ops_at_level_above.insert(parent_path, ops_on_path); @@ -1901,7 +1957,7 @@ impl GroveDb { GroveOp::ReplaceTreeRootKey { hash: root_hash, root_key: calculated_root_key, - sum: sum_value, + aggregate_data, } .into(), ); @@ -2160,7 +2216,12 @@ impl GroveDb { if let Some((parent_path, parent_key)) = path.derive_parent() { if new_merk { // TODO: can this be a sum tree - Ok(Merk::open_empty(storage, MerkType::LayeredMerk, false)).wrap_with_cost(cost) + Ok(Merk::open_empty( + storage, + MerkType::LayeredMerk, + TreeType::NormalTree, + )) + .wrap_with_cost(cost) } else { let parent_storage = self .db @@ -2183,12 +2244,11 @@ impl GroveDb { } ) ); - let is_sum_tree = element.is_sum_tree(); - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -2204,11 +2264,16 @@ impl GroveDb { } } } else if new_merk { - Ok(Merk::open_empty(storage, MerkType::BaseMerk, false)).wrap_with_cost(cost) + Ok(Merk::open_empty( + storage, + MerkType::BaseMerk, + TreeType::NormalTree, + )) + .wrap_with_cost(cost) } else { Merk::open_base( storage, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -2244,7 +2309,8 @@ impl GroveDb { } else { MerkType::LayeredMerk }; - Ok(Merk::open_empty(storage, merk_type, false)).wrap_with_cost(local_cost) + Ok(Merk::open_empty(storage, merk_type, TreeType::NormalTree)) + .wrap_with_cost(local_cost) } else if let Some((base_path, last)) = path.derive_parent() { let parent_storage = self .db @@ -2254,12 +2320,11 @@ impl GroveDb { &mut local_cost, Element::get_from_storage(&parent_storage, last, grove_version) ); - let is_sum_tree = element.is_sum_tree(); - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -2276,7 +2341,7 @@ impl GroveDb { } else { Merk::open_base( storage, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) diff --git a/grovedb/src/batch/single_deletion_cost_tests.rs b/grovedb/src/batch/single_deletion_cost_tests.rs index c84a2e47..6e784e57 100644 --- a/grovedb/src/batch/single_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_deletion_cost_tests.rs @@ -7,6 +7,7 @@ mod tests { Identifier, StorageRemovalPerEpochByIdentifier, StorageRemovedBytes::SectionedStorageRemoval, }; + use grovedb_merk::tree_type::TreeType; use grovedb_version::version::GroveVersion; use intmap::IntMap; @@ -75,7 +76,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch(ops, None, Some(&tx), grove_version) @@ -219,7 +220,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch(ops, None, None, grove_version) @@ -368,7 +369,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch(ops, None, Some(&tx), grove_version) @@ -467,7 +468,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch_with_element_flags_update( @@ -642,7 +643,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch(ops, None, None, grove_version) diff --git a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs index 7db03b7f..4763af16 100644 --- a/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs +++ b/grovedb/src/batch/single_sum_item_deletion_cost_tests.rs @@ -2,6 +2,7 @@ #[cfg(feature = "minimal")] mod tests { + use grovedb_merk::tree_type::TreeType; use grovedb_version::version::GroveVersion; use crate::{ @@ -46,7 +47,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch(ops, None, Some(&tx), grove_version) @@ -153,7 +154,7 @@ mod tests { let ops = vec![QualifiedGroveDbOp::delete_tree_op( vec![], b"key1".to_vec(), - false, + TreeType::NormalTree, )]; let batch_cost = db .apply_batch(ops, None, Some(&tx), grove_version) diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index 1920ff81..696a3a6a 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -397,22 +397,28 @@ fn merk_proof_node_to_grovedbg(node: Node) -> Result { - let element = crate::Element::deserialize(&value, GroveVersion::latest())?; - MerkProofNode::KVValueHashFeatureType( - key, - element_to_grovedbg(element), - hash, - grovedbg_types::TreeFeatureType::BasicMerkNode, - ) - } - Node::KVValueHashFeatureType(key, value, hash, TreeFeatureType::SummedMerkNode(sum)) => { + Node::KVValueHashFeatureType(key, value, hash, feature_type) => { let element = crate::Element::deserialize(&value, GroveVersion::latest())?; + let node_feature_type = match feature_type { + TreeFeatureType::BasicMerkNode => grovedbg_types::TreeFeatureType::BasicMerkNode, + TreeFeatureType::SummedMerkNode(sum) => { + grovedbg_types::TreeFeatureType::SummedMerkNode(sum) + } + TreeFeatureType::BigSummedMerkNode(sum) => { + grovedbg_types::TreeFeatureType::BigSummedMerkNode(sum) + } + TreeFeatureType::CountedMerkNode(count) => { + grovedbg_types::TreeFeatureType::CountedMerkNode(count) + } + TreeFeatureType::CountedSummedMerkNode(count, sum) => { + grovedbg_types::TreeFeatureType::CountedSummedMerkNode(count, sum) + } + }; MerkProofNode::KVValueHashFeatureType( key, element_to_grovedbg(element), hash, - grovedbg_types::TreeFeatureType::SummedMerkNode(sum), + node_feature_type, ) } Node::KVRefValueHash(key, value, hash) => { @@ -597,6 +603,28 @@ fn element_to_grovedbg(element: crate::Element) -> grovedbg_types::Element { sum, element_flags, }, + crate::Element::BigSumTree(root_key, sum, element_flags) => { + grovedbg_types::Element::BigSumTree { + root_key, + sum, + element_flags, + } + } + crate::Element::CountTree(root_key, count, element_flags) => { + grovedbg_types::Element::CountTree { + root_key, + count, + element_flags, + } + } + crate::Element::CountSumTree(root_key, count, sum, element_flags) => { + grovedbg_types::Element::CountSumTree { + root_key, + count, + sum, + element_flags, + } + } } } @@ -628,8 +656,17 @@ fn node_to_update( right_merk_hash, feature_type: match feature_type { TreeFeatureType::BasicMerkNode => grovedbg_types::TreeFeatureType::BasicMerkNode, - TreeFeatureType::SummedMerkNode(x) => { - grovedbg_types::TreeFeatureType::SummedMerkNode(x) + TreeFeatureType::SummedMerkNode(sum) => { + grovedbg_types::TreeFeatureType::SummedMerkNode(sum) + } + TreeFeatureType::BigSummedMerkNode(sum) => { + grovedbg_types::TreeFeatureType::BigSummedMerkNode(sum) + } + TreeFeatureType::CountedMerkNode(count) => { + grovedbg_types::TreeFeatureType::CountedMerkNode(count) + } + TreeFeatureType::CountedSummedMerkNode(count, sum) => { + grovedbg_types::TreeFeatureType::CountedSummedMerkNode(count, sum) } }, value_hash, diff --git a/grovedb/src/element/constructor.rs b/grovedb/src/element/constructor.rs index 1d5049cd..a6bf00bb 100644 --- a/grovedb/src/element/constructor.rs +++ b/grovedb/src/element/constructor.rs @@ -1,6 +1,8 @@ //! Constructor //! Functions for setting an element's type +#[cfg(feature = "minimal")] +use crate::element::{BigSumValue, CountValue}; #[cfg(feature = "minimal")] use crate::{ element::{MaxReferenceHop, SumValue}, @@ -28,12 +30,48 @@ impl Element { Element::new_sum_tree(Default::default()) } + #[cfg(feature = "minimal")] + /// Set element to default empty big sum tree without flags + pub fn empty_big_sum_tree() -> Self { + Element::new_big_sum_tree(Default::default()) + } + + #[cfg(feature = "minimal")] + /// Set element to default empty count tree without flags + pub fn empty_count_tree() -> Self { + Element::new_count_tree(Default::default()) + } + + #[cfg(feature = "minimal")] + /// Set element to default empty count sum tree without flags + pub fn empty_count_sum_tree() -> Self { + Element::new_count_sum_tree(Default::default()) + } + #[cfg(feature = "minimal")] /// Set element to default empty sum tree with flags pub fn empty_sum_tree_with_flags(flags: Option) -> Self { Element::new_sum_tree_with_flags(Default::default(), flags) } + #[cfg(feature = "minimal")] + /// Set element to default empty sum tree with flags + pub fn empty_big_sum_tree_with_flags(flags: Option) -> Self { + Element::new_big_sum_tree_with_flags(Default::default(), flags) + } + + #[cfg(feature = "minimal")] + /// Set element to default empty count tree with flags + pub fn empty_count_tree_with_flags(flags: Option) -> Self { + Element::new_count_tree_with_flags(Default::default(), flags) + } + + #[cfg(feature = "minimal")] + /// Set element to default empty count sum tree with flags + pub fn empty_count_sum_tree_with_flags(flags: Option) -> Self { + Element::new_count_sum_tree_with_flags(Default::default(), flags) + } + #[cfg(feature = "minimal")] /// Set element to an item without flags pub fn new_item(item_value: Vec) -> Self { @@ -131,4 +169,80 @@ impl Element { ) -> Self { Element::SumTree(maybe_root_key, sum_value, flags) } + + #[cfg(feature = "minimal")] + /// Set element to a big sum tree without flags + pub fn new_big_sum_tree(maybe_root_key: Option>) -> Self { + Element::BigSumTree(maybe_root_key, 0, None) + } + + #[cfg(feature = "minimal")] + /// Set element to a big sum tree with flags + pub fn new_big_sum_tree_with_flags( + maybe_root_key: Option>, + flags: Option, + ) -> Self { + Element::BigSumTree(maybe_root_key, 0, flags) + } + + #[cfg(feature = "minimal")] + /// Set element to a big sum tree with flags and sum value + pub fn new_big_sum_tree_with_flags_and_sum_value( + maybe_root_key: Option>, + big_sum_value: BigSumValue, + flags: Option, + ) -> Self { + Element::BigSumTree(maybe_root_key, big_sum_value, flags) + } + + #[cfg(feature = "minimal")] + /// Set element to a count tree without flags + pub fn new_count_tree(maybe_root_key: Option>) -> Self { + Element::CountTree(maybe_root_key, 0, None) + } + + #[cfg(feature = "minimal")] + /// Set element to a count tree with flags + pub fn new_count_tree_with_flags( + maybe_root_key: Option>, + flags: Option, + ) -> Self { + Element::CountTree(maybe_root_key, 0, flags) + } + + #[cfg(feature = "minimal")] + /// Set element to a count tree with flags and sum value + pub fn new_count_tree_with_flags_and_count_value( + maybe_root_key: Option>, + count_value: CountValue, + flags: Option, + ) -> Self { + Element::CountTree(maybe_root_key, count_value, flags) + } + + #[cfg(feature = "minimal")] + /// Set element to a count sum tree without flags + pub fn new_count_sum_tree(maybe_root_key: Option>) -> Self { + Element::CountSumTree(maybe_root_key, 0, 0, None) + } + + #[cfg(feature = "minimal")] + /// Set element to a count sum tree with flags + pub fn new_count_sum_tree_with_flags( + maybe_root_key: Option>, + flags: Option, + ) -> Self { + Element::CountSumTree(maybe_root_key, 0, 0, flags) + } + + #[cfg(feature = "minimal")] + /// Set element to a count sum tree with flags and sum value + pub fn new_count_sum_tree_with_flags_and_sum_and_count_value( + maybe_root_key: Option>, + count_value: CountValue, + sum_value: SumValue, + flags: Option, + ) -> Self { + Element::CountSumTree(maybe_root_key, count_value, sum_value, flags) + } } diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 8c9b3511..17095d72 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -6,6 +6,8 @@ use grovedb_costs::OperationCost; #[cfg(feature = "minimal")] use grovedb_costs::{storage_cost::removal::StorageRemovedBytes, CostResult, CostsExt}; #[cfg(feature = "minimal")] +use grovedb_merk::tree_type::TreeType; +#[cfg(feature = "minimal")] use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op}; #[cfg(feature = "minimal")] use grovedb_storage::StorageContext; @@ -27,25 +29,37 @@ impl Element { key: K, merk_options: Option, is_layered: bool, - is_sum: bool, + in_tree_type: TreeType, grove_version: &GroveVersion, ) -> CostResult<(), Error> { check_grovedb_v0_with_cost!("delete", grove_version.grovedb_versions.element.delete); - let op = match (is_sum, is_layered) { - (true, true) => Op::DeleteLayeredMaybeSpecialized, - (true, false) => Op::DeleteMaybeSpecialized, - (false, true) => Op::DeleteLayered, - (false, false) => Op::Delete, + let op = match (in_tree_type, is_layered) { + (TreeType::NormalTree, true) => Op::DeleteLayered, + (TreeType::NormalTree, false) => Op::Delete, + (TreeType::SumTree, true) + | (TreeType::BigSumTree, true) + | (TreeType::CountTree, true) + | (TreeType::CountSumTree, true) => Op::DeleteLayeredMaybeSpecialized, + (TreeType::SumTree, false) + | (TreeType::BigSumTree, false) + | (TreeType::CountTree, false) + | (TreeType::CountSumTree, false) => Op::DeleteMaybeSpecialized, }; let batch = [(key, op)]; - let uses_sum_nodes = merk.is_sum_tree; + // todo not sure we get it again, we need to see if this is necessary + let tree_type = merk.tree_type; merk.apply_with_specialized_costs::<_, Vec>( &batch, &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + Self::specialized_costs_for_key_value( + key, + value, + tree_type.inner_node_type(), + grove_version, + ) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), grove_version, @@ -60,7 +74,7 @@ impl Element { key: K, merk_options: Option, is_layered: bool, - is_in_sum_tree: bool, + in_tree_type: TreeType, sectioned_removal: &mut impl FnMut( &Vec, u32, @@ -78,21 +92,33 @@ impl Element { .element .delete_with_sectioned_removal_bytes ); - let op = match (is_in_sum_tree, is_layered) { - (true, true) => Op::DeleteLayeredMaybeSpecialized, - (true, false) => Op::DeleteMaybeSpecialized, - (false, true) => Op::DeleteLayered, - (false, false) => Op::Delete, + let op = match (in_tree_type, is_layered) { + (TreeType::NormalTree, true) => Op::DeleteLayered, + (TreeType::NormalTree, false) => Op::Delete, + (TreeType::SumTree, true) + | (TreeType::BigSumTree, true) + | (TreeType::CountTree, true) + | (TreeType::CountSumTree, true) => Op::DeleteLayeredMaybeSpecialized, + (TreeType::SumTree, false) + | (TreeType::BigSumTree, false) + | (TreeType::CountTree, false) + | (TreeType::CountSumTree, false) => Op::DeleteMaybeSpecialized, }; let batch = [(key, op)]; - let uses_sum_nodes = merk.is_sum_tree; + // todo not sure we get it again, we need to see if this is necessary + let tree_type = merk.tree_type; merk.apply_with_costs_just_in_time_value_update::<_, Vec>( &batch, &[], merk_options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + Self::specialized_costs_for_key_value( + key, + value, + tree_type.inner_node_type(), + grove_version, + ) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), &|_, _| Ok(None), @@ -108,7 +134,7 @@ impl Element { pub fn delete_into_batch_operations>( key: K, is_layered: bool, - is_sum: bool, + in_tree_type: TreeType, batch_operations: &mut Vec>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -119,11 +145,17 @@ impl Element { .element .delete_into_batch_operations ); - let op = match (is_sum, is_layered) { - (true, true) => Op::DeleteLayeredMaybeSpecialized, - (true, false) => Op::DeleteMaybeSpecialized, - (false, true) => Op::DeleteLayered, - (false, false) => Op::Delete, + let op = match (in_tree_type, is_layered) { + (TreeType::NormalTree, true) => Op::DeleteLayered, + (TreeType::NormalTree, false) => Op::Delete, + (TreeType::SumTree, true) + | (TreeType::BigSumTree, true) + | (TreeType::CountTree, true) + | (TreeType::CountSumTree, true) => Op::DeleteLayeredMaybeSpecialized, + (TreeType::SumTree, false) + | (TreeType::BigSumTree, false) + | (TreeType::CountTree, false) + | (TreeType::CountSumTree, false) => Op::DeleteMaybeSpecialized, }; let entry = (key, op); batch_operations.push(entry); diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index 3c66b049..40868c77 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -5,19 +5,24 @@ use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -use grovedb_merk::tree::kv::KV; #[cfg(feature = "minimal")] use grovedb_merk::Merk; #[cfg(feature = "minimal")] use grovedb_merk::{ed::Decode, tree::TreeNodeInner}; #[cfg(feature = "minimal")] +use grovedb_merk::{merk::NodeType, tree::kv::KV}; +#[cfg(feature = "minimal")] use grovedb_storage::StorageContext; use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; use integer_encoding::VarInt; -use crate::element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}; +#[cfg(feature = "minimal")] +use crate::{ + element::{CostSize, SUM_ITEM_COST_SIZE}, + operations::proof::util::path_as_slices_hex_to_ascii, +}; #[cfg(feature = "minimal")] use crate::{Element, Error, Hash}; @@ -35,13 +40,19 @@ impl Element { Self::get_optional(merk, key.as_ref(), allow_cache, grove_version).map(|result| { let value = result?; value.ok_or_else(|| { + let key_single_byte = if key.as_ref().len() == 1 { + format!("({} in decimal) ", key.as_ref().get(0).unwrap()) + } else { + String::new() + }; Error::PathKeyNotFound(format!( - "get: key \"{}\" not found in Merk that has a root key [{}] and is of type {}", + "get: key 0x{} {}not found in Merk that has a root key [{}] and is of type {}", hex::encode(key), + key_single_byte, merk.root_key() .map(hex::encode) .unwrap_or("None".to_string()), - merk.merk_type + merk.merk_type, )) }) }) @@ -118,13 +129,32 @@ impl Element { key: K, grove_version: &GroveVersion, ) -> CostResult, Error> { - check_grovedb_v0_with_cost!( - "get_optional_from_storage", - grove_version - .grovedb_versions - .element - .get_optional_from_storage - ); + match grove_version + .grovedb_versions + .element + .get_optional_from_storage + { + 0 => Self::get_optional_from_storage_v0(storage, key, grove_version), + 1 => Self::get_optional_from_storage_v1(storage, key, grove_version), + version => Err(Error::VersionError( + GroveVersionError::UnknownVersionMismatch { + method: "get_optional_from_storage".to_string(), + known_versions: vec![0, 1], + received: version, + }, + )) + .wrap_with_cost(OperationCost::default()), + } + } + + #[cfg(feature = "minimal")] + /// Get an element directly from storage under a key + /// Merk does not need to be loaded + fn get_optional_from_storage_v0<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( + storage: &S, + key: K, + grove_version: &GroveVersion, + ) -> CostResult, Error> { let mut cost = OperationCost::default(); let key_ref = key.as_ref(); let node_value_opt = cost_return_on_error!( @@ -162,7 +192,7 @@ impl Element { cost.storage_loaded_bytes = KV::value_byte_cost_size_for_key_and_value_lengths( key_ref.len() as u32, value.as_ref().unwrap().len() as u32, - false, + NodeType::NormalNode, ) as u64 } Some(Element::SumItem(_, flags)) => { @@ -172,15 +202,18 @@ impl Element { flags_len + flags_len.required_space() as u32 }); let value_len = cost_size + flags_len; - cost.storage_loaded_bytes = - KV::node_value_byte_cost_size(key_ref.len() as u32, value_len, false) as u64 + cost.storage_loaded_bytes = KV::node_value_byte_cost_size( + key_ref.len() as u32, + value_len, + NodeType::NormalNode, + ) as u64 } - Some(Element::Tree(_, flags)) | Some(Element::SumTree(_, _, flags)) => { - let tree_cost_size = if element.as_ref().unwrap().is_sum_tree() { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + Some(Element::Tree(_, flags)) + | Some(Element::SumTree(_, _, flags)) + | Some(Element::BigSumTree(_, _, flags)) + | Some(Element::CountTree(_, _, flags)) + | Some(Element::CountSumTree(.., flags)) => { + let tree_cost_size = element.as_ref().unwrap().tree_type().unwrap().cost_size(); let flags_len = flags.as_ref().map_or(0, |flags| { let flags_len = flags.len() as u32; flags_len + flags_len.required_space() as u32 @@ -190,7 +223,7 @@ impl Element { KV::layered_value_byte_cost_size_for_key_and_value_lengths( key_ref.len() as u32, value_len, - false, + NodeType::NormalNode, ) as u64 } None => {} @@ -198,6 +231,87 @@ impl Element { Ok(element).wrap_with_cost(cost) } + #[cfg(feature = "minimal")] + /// Get an element directly from storage under a key + /// Merk does not need to be loaded + fn get_optional_from_storage_v1<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( + storage: &S, + key: K, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + let mut cost = OperationCost::default(); + let key_ref = key.as_ref(); + let node_value_opt = cost_return_on_error!( + &mut cost, + storage + .get(key_ref) + .map_err(|e| Error::CorruptedData(e.to_string())) + ); + let maybe_tree_inner: Option = cost_return_on_error_no_add!( + &cost, + node_value_opt + .map(|node_value| { + Decode::decode(node_value.as_slice()) + .map_err(|e| Error::CorruptedData(e.to_string())) + }) + .transpose() + ); + + let Some((value, tree_feature_type)) = + maybe_tree_inner.map(|tree_inner| tree_inner.value_as_owned_with_feature()) + else { + return Ok(None).wrap_with_cost(cost); + }; + let node_type = tree_feature_type.node_type(); + let element = cost_return_on_error_no_add!( + &cost, + Self::deserialize(value.as_slice(), grove_version).map_err(|_| { + Error::CorruptedData(String::from("unable to deserialize element")) + }) + ); + match &element { + Element::Item(..) | Element::Reference(..) => { + // while the loaded item might be a sum item, it is given for free + // as it would be very hard to know in advance + cost.storage_loaded_bytes = KV::value_byte_cost_size_for_key_and_value_lengths( + key_ref.len() as u32, + value.len() as u32, + node_type, + ) as u64 + } + Element::SumItem(_, flags) => { + let cost_size = SUM_ITEM_COST_SIZE; + let flags_len = flags.as_ref().map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + let value_len = cost_size + flags_len; + cost.storage_loaded_bytes = + KV::node_value_byte_cost_size(key_ref.len() as u32, value_len, node_type) as u64 + // this is changed to sum node in v1 + } + Element::Tree(_, flags) + | Element::SumTree(_, _, flags) + | Element::BigSumTree(_, _, flags) + | Element::CountTree(_, _, flags) + | Element::CountSumTree(.., flags) => { + let tree_cost_size = element.tree_type().unwrap().cost_size(); + let flags_len = flags.as_ref().map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + let value_len = tree_cost_size + flags_len; + cost.storage_loaded_bytes = + KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key_ref.len() as u32, + value_len, + node_type, + ) as u64 + } + } + Ok(Some(element)).wrap_with_cost(cost) + } + #[cfg(feature = "minimal")] /// Get an element from Merk under a key; path should be resolved and proper /// Merk should be loaded by this moment @@ -208,6 +322,8 @@ impl Element { allow_cache: bool, grove_version: &GroveVersion, ) -> CostResult { + use crate::error::GroveDbErrorExt; + check_grovedb_v0_with_cost!( "get_with_absolute_refs", grove_version @@ -220,6 +336,7 @@ impl Element { let element = cost_return_on_error!( &mut cost, Self::get(merk, key.as_ref(), allow_cache, grove_version) + .add_context(format!("path is {}", path_as_slices_hex_to_ascii(path))) ); let absolute_element = cost_return_on_error_no_add!( @@ -262,6 +379,7 @@ impl Element { #[cfg(feature = "minimal")] #[cfg(test)] mod tests { + use grovedb_merk::tree_type::TreeType; use grovedb_path::SubtreePath; use grovedb_storage::{rocksdb_storage::test_utils::TempStorage, Storage, StorageBatch}; @@ -277,7 +395,7 @@ mod tests { .unwrap(); let mut merk = Merk::open_base( ctx, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -302,7 +420,7 @@ mod tests { .unwrap(); let mut merk = Merk::open_base( ctx, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index af74c6f8..d5cf0321 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -1,33 +1,40 @@ //! Helpers //! Implements helper functions in Element -#[cfg(feature = "minimal")] -use grovedb_merk::tree::kv::{ - ValueDefinedCostType, - ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, -}; +#[cfg(any(feature = "minimal", feature = "verify"))] +use grovedb_merk::tree_type::{MaybeTree, TreeType}; #[cfg(feature = "minimal")] use grovedb_merk::{ - tree::{kv::KV, TreeNode}, + merk::NodeType, + tree::{ + kv::{ + ValueDefinedCostType, + ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, + KV, + }, + TreeNode, + }, TreeFeatureType, - TreeFeatureType::{BasicMerkNode, SummedMerkNode}, + TreeFeatureType::{ + BasicMerkNode, BigSummedMerkNode, CountedMerkNode, CountedSummedMerkNode, SummedMerkNode, + }, }; #[cfg(feature = "minimal")] use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; #[cfg(feature = "minimal")] use integer_encoding::VarInt; +#[cfg(feature = "minimal")] +use crate::element::{ + BIG_SUM_TREE_COST_SIZE, COUNT_SUM_TREE_COST_SIZE, COUNT_TREE_COST_SIZE, SUM_ITEM_COST_SIZE, + SUM_TREE_COST_SIZE, TREE_COST_SIZE, +}; #[cfg(feature = "minimal")] use crate::reference_path::path_from_reference_path_type; #[cfg(any(feature = "minimal", feature = "verify"))] use crate::reference_path::ReferencePathType; -#[cfg(feature = "minimal")] -use crate::{ - element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, - ElementFlags, -}; #[cfg(any(feature = "minimal", feature = "verify"))] -use crate::{Element, Error}; +use crate::{Element, ElementFlags, Error}; impl Element { #[cfg(any(feature = "minimal", feature = "verify"))] @@ -40,6 +47,41 @@ impl Element { } } + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Decoded the integer value in the CountTree element type, returns 1 for + /// everything else + pub fn count_value_or_default(&self) -> u64 { + match self { + Element::CountTree(_, count_value, _) => *count_value, + _ => 1, + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Decoded the integer value in the CountTree element type, returns 1 for + /// everything else + pub fn count_sum_value_or_default(&self) -> (u64, i64) { + match self { + Element::SumItem(sum_value, _) | Element::SumTree(_, sum_value, _) => (1, *sum_value), + Element::CountTree(_, count_value, _) => (*count_value, 0), + Element::CountSumTree(_, count_value, sum_value, _) => (*count_value, *sum_value), + _ => (1, 0), + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Decoded the integer value in the SumItem element type, returns 0 for + /// everything else + pub fn big_sum_value_or_default(&self) -> i128 { + match self { + Element::SumItem(sum_value, _) | Element::SumTree(_, sum_value, _) => { + *sum_value as i128 + } + Element::BigSumTree(_, sum_value, _) => *sum_value, + _ => 0, + } + } + #[cfg(any(feature = "minimal", feature = "verify"))] /// Decoded the integer value in the SumItem element type pub fn as_sum_item_value(&self) -> Result { @@ -109,6 +151,79 @@ impl Element { matches!(self, Element::SumTree(..)) } + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Check if the element is a tree and return the root_tree info and tree + /// type + pub fn root_key_and_tree_type_owned(self) -> Option<(Option>, TreeType)> { + match self { + Element::Tree(root_key, _) => Some((root_key, TreeType::NormalTree)), + Element::SumTree(root_key, ..) => Some((root_key, TreeType::SumTree)), + Element::BigSumTree(root_key, ..) => Some((root_key, TreeType::BigSumTree)), + Element::CountTree(root_key, ..) => Some((root_key, TreeType::CountTree)), + Element::CountSumTree(root_key, ..) => Some((root_key, TreeType::CountSumTree)), + _ => None, + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Check if the element is a tree and return the root_tree info and the + /// tree type + pub fn root_key_and_tree_type(&self) -> Option<(&Option>, TreeType)> { + match self { + Element::Tree(root_key, _) => Some((root_key, TreeType::NormalTree)), + Element::SumTree(root_key, ..) => Some((root_key, TreeType::SumTree)), + Element::BigSumTree(root_key, ..) => Some((root_key, TreeType::BigSumTree)), + Element::CountTree(root_key, ..) => Some((root_key, TreeType::CountTree)), + Element::CountSumTree(root_key, ..) => Some((root_key, TreeType::CountSumTree)), + _ => None, + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Check if the element is a tree and return the flags and the tree type + pub fn tree_flags_and_type(&self) -> Option<(&Option, TreeType)> { + match self { + Element::Tree(_, flags) => Some((flags, TreeType::NormalTree)), + Element::SumTree(_, _, flags) => Some((flags, TreeType::SumTree)), + Element::BigSumTree(_, _, flags) => Some((flags, TreeType::BigSumTree)), + Element::CountTree(_, _, flags) => Some((flags, TreeType::CountTree)), + Element::CountSumTree(.., flags) => Some((flags, TreeType::CountSumTree)), + _ => None, + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Check if the element is a tree and return the tree type + pub fn tree_type(&self) -> Option { + match self { + Element::Tree(..) => Some(TreeType::NormalTree), + Element::SumTree(..) => Some(TreeType::SumTree), + Element::BigSumTree(..) => Some(TreeType::BigSumTree), + Element::CountTree(..) => Some(TreeType::CountTree), + Element::CountSumTree(..) => Some(TreeType::CountSumTree), + _ => None, + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Check if the element is a tree and return the tree type + pub fn maybe_tree_type(&self) -> MaybeTree { + match self { + Element::Tree(..) => MaybeTree::Tree(TreeType::NormalTree), + Element::SumTree(..) => MaybeTree::Tree(TreeType::SumTree), + Element::BigSumTree(..) => MaybeTree::Tree(TreeType::BigSumTree), + Element::CountTree(..) => MaybeTree::Tree(TreeType::CountTree), + Element::CountSumTree(..) => MaybeTree::Tree(TreeType::CountSumTree), + _ => MaybeTree::NotTree, + } + } + + #[cfg(any(feature = "minimal", feature = "verify"))] + /// Check if the element is a big sum tree + pub fn is_big_sum_tree(&self) -> bool { + matches!(self, Element::BigSumTree(..)) + } + #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a tree but not a sum tree pub fn is_basic_tree(&self) -> bool { @@ -118,7 +233,14 @@ impl Element { #[cfg(any(feature = "minimal", feature = "verify"))] /// Check if the element is a tree pub fn is_any_tree(&self) -> bool { - matches!(self, Element::SumTree(..) | Element::Tree(..)) + matches!( + self, + Element::SumTree(..) + | Element::Tree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) + ) } #[cfg(any(feature = "minimal", feature = "verify"))] @@ -147,10 +269,16 @@ impl Element { #[cfg(feature = "minimal")] /// Get the tree feature type - pub fn get_feature_type(&self, parent_is_sum_tree: bool) -> Result { - match parent_is_sum_tree { - true => Ok(SummedMerkNode(self.sum_value_or_default())), - false => Ok(BasicMerkNode), + pub fn get_feature_type(&self, parent_tree_type: TreeType) -> Result { + match parent_tree_type { + TreeType::NormalTree => Ok(BasicMerkNode), + TreeType::SumTree => Ok(SummedMerkNode(self.sum_value_or_default())), + TreeType::BigSumTree => Ok(BigSummedMerkNode(self.big_sum_value_or_default())), + TreeType::CountTree => Ok(CountedMerkNode(self.count_value_or_default())), + TreeType::CountSumTree => { + let v = self.count_sum_value_or_default(); + Ok(CountedSummedMerkNode(v.0, v.1)) + } } } @@ -162,7 +290,10 @@ impl Element { | Element::Item(_, flags) | Element::Reference(_, _, flags) | Element::SumTree(.., flags) - | Element::SumItem(_, flags) => flags, + | Element::BigSumTree(.., flags) + | Element::CountTree(.., flags) + | Element::SumItem(_, flags) + | Element::CountSumTree(.., flags) => flags, } } @@ -174,7 +305,10 @@ impl Element { | Element::Item(_, flags) | Element::Reference(_, _, flags) | Element::SumTree(.., flags) - | Element::SumItem(_, flags) => flags, + | Element::BigSumTree(.., flags) + | Element::CountTree(.., flags) + | Element::SumItem(_, flags) + | Element::CountSumTree(.., flags) => flags, } } @@ -186,7 +320,10 @@ impl Element { | Element::Item(_, flags) | Element::Reference(_, _, flags) | Element::SumTree(.., flags) - | Element::SumItem(_, flags) => flags, + | Element::BigSumTree(.., flags) + | Element::CountTree(.., flags) + | Element::SumItem(_, flags) + | Element::CountSumTree(.., flags) => flags, } } @@ -198,7 +335,10 @@ impl Element { | Element::Item(_, flags) | Element::Reference(_, _, flags) | Element::SumTree(.., flags) - | Element::SumItem(_, flags) => *flags = new_flags, + | Element::BigSumTree(.., flags) + | Element::CountTree(.., flags) + | Element::SumItem(_, flags) + | Element::CountSumTree(.., flags) => *flags = new_flags, } } @@ -252,7 +392,7 @@ impl Element { pub fn specialized_costs_for_key_value( key: &Vec, value: &[u8], - is_sum_node: bool, + node_type: NodeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -273,9 +413,7 @@ impl Element { let value_len = TREE_COST_SIZE + flags_len; let key_len = key.len() as u32; KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key_len, - value_len, - is_sum_node, + key_len, value_len, node_type, ) } Element::SumTree(_, _sum_value, flags) => { @@ -286,9 +424,40 @@ impl Element { let value_len = SUM_TREE_COST_SIZE + flags_len; let key_len = key.len() as u32; KV::layered_value_byte_cost_size_for_key_and_value_lengths( - key_len, - value_len, - is_sum_node, + key_len, value_len, node_type, + ) + } + Element::BigSumTree(_, _sum_value, flags) => { + let flags_len = flags.map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + let value_len = BIG_SUM_TREE_COST_SIZE + flags_len; + let key_len = key.len() as u32; + KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key_len, value_len, node_type, + ) + } + Element::CountTree(_, _count_value, flags) => { + let flags_len = flags.map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + let value_len = COUNT_TREE_COST_SIZE + flags_len; + let key_len = key.len() as u32; + KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key_len, value_len, node_type, + ) + } + Element::CountSumTree(.., flags) => { + let flags_len = flags.map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + let value_len = COUNT_SUM_TREE_COST_SIZE + flags_len; + let key_len = key.len() as u32; + KV::layered_value_byte_cost_size_for_key_and_value_lengths( + key_len, value_len, node_type, ) } Element::SumItem(.., flags) => { @@ -298,9 +467,9 @@ impl Element { }); let value_len = SUM_ITEM_COST_SIZE + flags_len; let key_len = key.len() as u32; - KV::node_value_byte_cost_size(key_len, value_len, is_sum_node) + KV::node_value_byte_cost_size(key_len, value_len, node_type) } - _ => KV::node_value_byte_cost_size(key.len() as u32, value.len() as u32, is_sum_node), + _ => KV::node_value_byte_cost_size(key.len() as u32, value.len() as u32, node_type), }; Ok(cost) } @@ -315,7 +484,10 @@ impl Element { match self { Element::Tree(..) => Ok(TREE_COST_SIZE), Element::SumTree(..) => Ok(SUM_TREE_COST_SIZE), + Element::BigSumTree(..) => Ok(BIG_SUM_TREE_COST_SIZE), Element::SumItem(..) => Ok(SUM_ITEM_COST_SIZE), + Element::CountTree(..) => Ok(COUNT_TREE_COST_SIZE), + Element::CountSumTree(..) => Ok(COUNT_SUM_TREE_COST_SIZE), _ => Err(Error::CorruptedCodeExecution( "trying to get tree cost from non tree element", )), @@ -337,6 +509,9 @@ impl Element { match self { Element::Tree(..) => Some(LayeredValueDefinedCost(cost)), Element::SumTree(..) => Some(LayeredValueDefinedCost(cost)), + Element::BigSumTree(..) => Some(LayeredValueDefinedCost(cost)), + Element::CountTree(..) => Some(LayeredValueDefinedCost(cost)), + Element::CountSumTree(..) => Some(LayeredValueDefinedCost(cost)), Element::SumItem(..) => Some(SpecializedValueDefinedCost(cost)), _ => None, } diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index 5b47acc4..942a6fd5 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -32,13 +32,13 @@ impl Element { let serialized = cost_return_on_error_default!(self.serialize(grove_version)); - if !merk.is_sum_tree && self.is_sum_item() { + if !merk.tree_type.allows_sum_item() && self.is_sum_item() { return Err(Error::InvalidInput("cannot add sum item to non sum tree")) .wrap_with_cost(Default::default()); } let merk_feature_type = - cost_return_on_error_default!(self.get_feature_type(merk.is_sum_tree)); + cost_return_on_error_default!(self.get_feature_type(merk.tree_type)); let batch_operations = if matches!(self, SumItem(..)) { let value_cost = cost_return_on_error_default!(self.get_specialized_cost(grove_version)); @@ -55,15 +55,20 @@ impl Element { } else { [(key, Op::Put(serialized, merk_feature_type))] }; - let uses_sum_nodes = merk.is_sum_tree; + let tree_type = merk.tree_type; merk.apply_with_specialized_costs::<_, Vec>( &batch_operations, &[], options, &|key, value| { // it is possible that a normal item was being replaced with a - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + Self::specialized_costs_for_key_value( + key, + value, + tree_type.inner_node_type(), + grove_version, + ) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), grove_version, @@ -306,7 +311,7 @@ impl Element { let mut cost = OperationCost::default(); let merk_feature_type = cost_return_on_error!( &mut cost, - self.get_feature_type(merk.is_sum_tree) + self.get_feature_type(merk.tree_type) .wrap_with_cost(OperationCost::default()) ); @@ -314,14 +319,19 @@ impl Element { key, Op::PutCombinedReference(serialized, referenced_value, merk_feature_type), )]; - let uses_sum_nodes = merk.is_sum_tree; + let tree_type = merk.tree_type; merk.apply_with_specialized_costs::<_, Vec>( &batch_operations, &[], options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + Self::specialized_costs_for_key_value( + key, + value, + tree_type.inner_node_type(), + grove_version, + ) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), grove_version, @@ -387,7 +397,7 @@ impl Element { let cost = OperationCost::default(); let merk_feature_type = - cost_return_on_error_no_add!(&cost, self.get_feature_type(merk.is_sum_tree)); + cost_return_on_error_no_add!(&cost, self.get_feature_type(merk.tree_type)); let tree_cost = cost_return_on_error_no_add!(&cost, self.get_specialized_cost(grove_version)); @@ -401,14 +411,19 @@ impl Element { key, Op::PutLayeredReference(serialized, cost, subtree_root_hash, merk_feature_type), )]; - let uses_sum_nodes = merk.is_sum_tree; + let tree_type = merk.tree_type; merk.apply_with_specialized_costs::<_, Vec>( &batch_operations, &[], options, &|key, value| { - Self::specialized_costs_for_key_value(key, value, uses_sum_nodes, grove_version) - .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) + Self::specialized_costs_for_key_value( + key, + value, + tree_type.inner_node_type(), + grove_version, + ) + .map_err(|e| MerkError::ClientCorruptionError(e.to_string())) }, Some(&Element::value_defined_cost_for_serialized_value), grove_version, diff --git a/grovedb/src/element/mod.rs b/grovedb/src/element/mod.rs index c8ecbbc8..06978897 100644 --- a/grovedb/src/element/mod.rs +++ b/grovedb/src/element/mod.rs @@ -24,10 +24,16 @@ mod serialize; use std::fmt; use bincode::{Decode, Encode}; -#[cfg(any(feature = "minimal", feature = "verify"))] +#[cfg(feature = "minimal")] +use grovedb_merk::estimated_costs::SUM_AND_COUNT_LAYER_COST_SIZE; +#[cfg(feature = "minimal")] use grovedb_merk::estimated_costs::SUM_VALUE_EXTRA_COST; #[cfg(feature = "minimal")] -use grovedb_merk::estimated_costs::{LAYER_COST_SIZE, SUM_LAYER_COST_SIZE}; +use grovedb_merk::estimated_costs::{ + BIG_SUM_LAYER_COST_SIZE, LAYER_COST_SIZE, SUM_LAYER_COST_SIZE, +}; +#[cfg(feature = "minimal")] +use grovedb_merk::tree_type::TreeType; #[cfg(feature = "minimal")] use grovedb_visualize::visualize_to_vec; @@ -49,7 +55,7 @@ pub type MaxReferenceHop = Option; #[cfg(feature = "minimal")] /// The cost of a tree pub const TREE_COST_SIZE: u32 = LAYER_COST_SIZE; // 3 -#[cfg(any(feature = "minimal", feature = "verify"))] +#[cfg(feature = "minimal")] /// The cost of a sum item /// /// It is 11 because we have 9 bytes for the sum value @@ -60,10 +66,48 @@ pub const SUM_ITEM_COST_SIZE: u32 = SUM_VALUE_EXTRA_COST + 2; // 11 /// The cost of a sum tree pub const SUM_TREE_COST_SIZE: u32 = SUM_LAYER_COST_SIZE; // 12 +#[cfg(feature = "minimal")] +/// The cost of a big sum tree +pub const BIG_SUM_TREE_COST_SIZE: u32 = BIG_SUM_LAYER_COST_SIZE; // 19 + +#[cfg(feature = "minimal")] +/// The cost of a count tree +pub const COUNT_TREE_COST_SIZE: u32 = SUM_LAYER_COST_SIZE; // 12 + +#[cfg(feature = "minimal")] +/// The cost of a count tree +pub const COUNT_SUM_TREE_COST_SIZE: u32 = SUM_AND_COUNT_LAYER_COST_SIZE; // 21 + #[cfg(any(feature = "minimal", feature = "verify"))] /// int 64 sum value pub type SumValue = i64; +#[cfg(any(feature = "minimal", feature = "verify"))] +/// int 128 sum value +pub type BigSumValue = i128; + +#[cfg(any(feature = "minimal", feature = "verify"))] +/// int 64 count value +pub type CountValue = u64; + +#[cfg(feature = "minimal")] +pub trait CostSize { + fn cost_size(&self) -> u32; +} + +#[cfg(feature = "minimal")] +impl CostSize for TreeType { + fn cost_size(&self) -> u32 { + match self { + TreeType::NormalTree => TREE_COST_SIZE, + TreeType::SumTree => SUM_TREE_COST_SIZE, + TreeType::BigSumTree => BIG_SUM_TREE_COST_SIZE, + TreeType::CountTree => COUNT_TREE_COST_SIZE, + TreeType::CountSumTree => COUNT_SUM_TREE_COST_SIZE, + } + } +} + #[cfg(any(feature = "minimal", feature = "verify"))] /// Variants of GroveDB stored entities /// @@ -85,6 +129,15 @@ pub enum Element { /// Same as Element::Tree but underlying Merk sums value of it's summable /// nodes SumTree(Option>, SumValue, Option), + /// Same as Element::Tree but underlying Merk sums value of it's summable + /// nodes in big form i128 + /// The big sum tree is valuable if you have a big sum tree of sum trees + BigSumTree(Option>, BigSumValue, Option), + /// Same as Element::Tree but underlying Merk counts value of its countable + /// nodes + CountTree(Option>, CountValue, Option), + /// Combines Element::SumTree and Element::CountTree + CountSumTree(Option>, CountValue, SumValue, Option), } impl fmt::Display for Element { @@ -142,6 +195,40 @@ impl fmt::Display for Element { .map_or(String::new(), |f| format!(", flags: {:?}", f)) ) } + Element::BigSumTree(root_key, sum_value, flags) => { + write!( + f, + "BigSumTree({}, {}{})", + root_key.as_ref().map_or("None".to_string(), hex::encode), + sum_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::CountTree(root_key, count_value, flags) => { + write!( + f, + "CountTree({}, {}{})", + root_key.as_ref().map_or("None".to_string(), hex::encode), + count_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } + Element::CountSumTree(root_key, count_value, sum_value, flags) => { + write!( + f, + "CountSumTree({}, {}, {}{})", + root_key.as_ref().map_or("None".to_string(), hex::encode), + count_value, + sum_value, + flags + .as_ref() + .map_or(String::new(), |f| format!(", flags: {:?}", f)) + ) + } } } } @@ -154,6 +241,9 @@ impl Element { Element::Tree(..) => "tree", Element::SumItem(..) => "sum item", Element::SumTree(..) => "sum tree", + Element::BigSumTree(..) => "big sum tree", + Element::CountTree(..) => "count tree", + Element::CountSumTree(..) => "count sum tree", } } diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index f1975aad..68e57056 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -15,6 +15,8 @@ use grovedb_merk::proofs::query::SubqueryBranch; #[cfg(feature = "minimal")] use grovedb_merk::proofs::Query; #[cfg(feature = "minimal")] +use grovedb_merk::tree_type::TreeType; +#[cfg(feature = "minimal")] use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; @@ -26,6 +28,8 @@ use grovedb_version::{ #[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; #[cfg(any(feature = "minimal", feature = "verify"))] +use crate::operations::proof::util::path_as_slices_hex_to_ascii; +#[cfg(any(feature = "minimal", feature = "verify"))] use crate::Element; #[cfg(feature = "minimal")] use crate::{ @@ -741,6 +745,8 @@ impl Element { add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { + use crate::error::GroveDbErrorExt; + check_grovedb_v0_with_cost!( "query_item", grove_version.grovedb_versions.element.query_item @@ -763,6 +769,7 @@ impl Element { grove_version, { Element::get(&subtree, key, query_options.allow_cache, grove_version) + .add_context(format!("path is {}", path_as_slices_hex_to_ascii(path))) .unwrap_add_cost(&mut cost) } ); diff --git a/grovedb/src/error.rs b/grovedb/src/error.rs index 4618e6ac..2ab1937f 100644 --- a/grovedb/src/error.rs +++ b/grovedb/src/error.rs @@ -2,6 +2,8 @@ use std::convert::Infallible; +use grovedb_costs::CostResult; + /// GroveDB Errors #[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Debug, thiserror::Error)] @@ -158,6 +160,50 @@ pub enum Error { CyclicError(&'static str), } +impl Error { + pub fn add_context(&mut self, append: impl AsRef) { + match self { + Self::MissingReference(s) + | Self::InternalError(s) + | Self::InvalidProof(s) + | Self::PathKeyNotFound(s) + | Self::PathNotFound(s) + | Self::PathParentLayerNotFound(s) + | Self::CorruptedReferencePathKeyNotFound(s) + | Self::CorruptedReferencePathNotFound(s) + | Self::CorruptedReferencePathParentLayerNotFound(s) + | Self::InvalidParentLayerPath(s) + | Self::InvalidPath(s) + | Self::CorruptedPath(s) + | Self::CorruptedData(s) + | Self::CorruptedStorage(s) + | Self::DeleteUpTreeStopHeightMoreThanInitialPathSize(s) + | Self::JustInTimeElementFlagsClientError(s) + | Self::SplitRemovalBytesClientError(s) + | Self::ClientReturnedNonClientError(s) + | Self::PathNotFoundInCacheForEstimatedCosts(s) + | Self::NotSupported(s) => { + s.push_str(", "); + s.push_str(append.as_ref()); + } + _ => {} + } + } +} + +pub trait GroveDbErrorExt { + fn add_context(self, append: impl AsRef) -> Self; +} + +impl GroveDbErrorExt for CostResult { + fn add_context(self, append: impl AsRef) -> Self { + self.map_err(|mut e| { + e.add_context(append.as_ref()); + e + }) + } +} + impl From for Error { fn from(_value: Infallible) -> Self { Self::Infallible diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index 74cfe807..e779d9b8 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -13,6 +13,7 @@ use grovedb_merk::{ }, }, tree::TreeNode, + tree_type::TreeType, HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; @@ -23,7 +24,7 @@ use integer_encoding::VarInt; use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, - element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, + element::{CostSize, SUM_ITEM_COST_SIZE}, Element, ElementFlags, Error, GroveDb, }; @@ -33,7 +34,7 @@ impl GroveDb { cost: &mut OperationCost, path: &KeyInfoPath, merk_should_be_empty: bool, - is_sum_tree: bool, + in_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -56,7 +57,7 @@ impl GroveDb { cost.storage_loaded_bytes += TreeNode::average_case_encoded_tree_size( key.max_length() as u32, HASH_LENGTH as u32, - is_sum_tree, + in_tree_type.inner_node_type(), ) as u64; } } @@ -69,19 +70,51 @@ impl GroveDb { pub(crate) fn average_case_merk_replace_tree( key: &KeyInfo, estimated_layer_information: &EstimatedLayerInformation, - _is_sum_tree: bool, + replacing_tree_type: TreeType, propagate: bool, grove_version: &GroveVersion, ) -> CostResult<(), Error> { - check_grovedb_v0_with_cost!( - "average_case_merk_replace_tree", - grove_version - .grovedb_versions - .operations - .average_case - .average_case_merk_replace_tree - ); + match grove_version + .grovedb_versions + .operations + .average_case + .average_case_merk_replace_tree + { + 0 => Self::average_case_merk_replace_tree_v0( + key, + estimated_layer_information, + replacing_tree_type, + propagate, + grove_version, + ), + 1 => Self::average_case_merk_replace_tree_v1( + key, + estimated_layer_information, + replacing_tree_type, + propagate, + grove_version, + ), + version => Err(Error::VersionError( + GroveVersionError::UnknownVersionMismatch { + method: "average_case_merk_replace_tree".to_string(), + known_versions: vec![0, 1], + received: version, + }, + )) + .wrap_with_cost(OperationCost::default()), + } + } + /// Add average case for insertion into merk + fn average_case_merk_replace_tree_v0( + key: &KeyInfo, + estimated_layer_information: &EstimatedLayerInformation, + _replacing_tree_type: TreeType, + propagate: bool, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { + // In v0 we used the estimated layer information tree type (which is the parent) + // in order to figure out the cost let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( @@ -93,20 +126,52 @@ impl GroveDb { ) .map(|f| f + f.required_space() as u32) .unwrap_or_default(); - let tree_cost_size = if estimated_layer_information.is_sum_tree { - SUM_TREE_COST_SIZE + let tree_cost_size = estimated_layer_information.tree_type.cost_size(); // this was wrong + let layer_extra_size = tree_cost_size + flags_size; + add_average_case_merk_replace_layered( + &mut cost, + key_len, + layer_extra_size, + estimated_layer_information.tree_type.inner_node_type(), + ); + if propagate { + add_average_case_merk_propagate(&mut cost, estimated_layer_information, grove_version) + .map_err(Error::MerkError) } else { - TREE_COST_SIZE - }; + Ok(()) + } + .wrap_with_cost(cost) + } + + /// Add average case for insertion into merk + fn average_case_merk_replace_tree_v1( + key: &KeyInfo, + estimated_layer_information: &EstimatedLayerInformation, + replacing_tree_type: TreeType, + propagate: bool, + grove_version: &GroveVersion, + ) -> CostResult<(), Error> { + let mut cost = OperationCost::default(); + let key_len = key.max_length() as u32; + let flags_size = cost_return_on_error_no_add!( + &cost, + estimated_layer_information + .estimated_layer_sizes + .layered_flags_size() + .map_err(Error::MerkError) + ) + .map(|f| f + f.required_space() as u32) + .unwrap_or_default(); + let tree_cost_size = replacing_tree_type.cost_size(); let layer_extra_size = tree_cost_size + flags_size; add_average_case_merk_replace_layered( &mut cost, key_len, layer_extra_size, - estimated_layer_information.is_sum_tree, + estimated_layer_information.tree_type.inner_node_type(), ); if propagate { - add_average_case_merk_propagate(&mut cost, estimated_layer_information) + add_average_case_merk_propagate(&mut cost, estimated_layer_information, grove_version) .map_err(Error::MerkError) } else { Ok(()) @@ -118,8 +183,8 @@ impl GroveDb { pub fn average_case_merk_insert_tree( key: &KeyInfo, flags: &Option, - is_sum_tree: bool, - in_tree_using_sums: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, propagate_if_input: Option<&EstimatedLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -138,15 +203,12 @@ impl GroveDb { let flags_len = flags.len() as u32; flags_len + flags_len.required_space() as u32 }); - let tree_cost_size = if is_sum_tree { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost_size = tree_type.cost_size(); let value_len = tree_cost_size + flags_len; - add_cost_case_merk_insert_layered(&mut cost, key_len, value_len, in_tree_using_sums); + add_cost_case_merk_insert_layered(&mut cost, key_len, value_len, in_parent_tree_type); if let Some(input) = propagate_if_input { - add_average_case_merk_propagate(&mut cost, input).map_err(Error::MerkError) + add_average_case_merk_propagate(&mut cost, input, grove_version) + .map_err(Error::MerkError) } else { Ok(()) } @@ -156,7 +218,7 @@ impl GroveDb { /// Add average case for insertion into merk pub fn average_case_merk_delete_tree( key: &KeyInfo, - is_sum_tree: bool, + tree_type: TreeType, estimated_layer_information: &EstimatedLayerInformation, propagate: bool, grove_version: &GroveVersion, @@ -181,15 +243,11 @@ impl GroveDb { ) .map(|f| f + f.required_space() as u32) .unwrap_or_default(); - let tree_cost_size = if is_sum_tree { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost_size = tree_type.cost_size(); let layer_extra_size = tree_cost_size + flags_size; add_average_case_merk_delete_layered(&mut cost, key_len, layer_extra_size); if propagate { - add_average_case_merk_propagate(&mut cost, estimated_layer_information) + add_average_case_merk_propagate(&mut cost, estimated_layer_information, grove_version) .map_err(Error::MerkError) } else { Ok(()) @@ -203,7 +261,7 @@ impl GroveDb { pub fn average_case_merk_insert_element( key: &KeyInfo, value: &Element, - in_tree_using_sums: bool, + in_tree_type: TreeType, propagate_for_level: Option<&EstimatedLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -218,29 +276,25 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; - match value { - Element::Tree(_, flags) | Element::SumTree(_, _, flags) => { - let flags_len = flags.as_ref().map_or(0, |flags| { - let flags_len = flags.len() as u32; - flags_len + flags_len.required_space() as u32 - }); - let tree_cost_size = if value.is_sum_tree() { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; - let value_len = tree_cost_size + flags_len; - add_cost_case_merk_insert_layered(&mut cost, key_len, value_len, in_tree_using_sums) - } - _ => add_cost_case_merk_insert( + if let Some((flags, tree_type)) = value.tree_flags_and_type() { + let flags_len = flags.as_ref().map_or(0, |flags| { + let flags_len = flags.len() as u32; + flags_len + flags_len.required_space() as u32 + }); + let tree_cost_size = tree_type.cost_size(); + let value_len = tree_cost_size + flags_len; + add_cost_case_merk_insert_layered(&mut cost, key_len, value_len, in_tree_type) + } else { + add_cost_case_merk_insert( &mut cost, key_len, cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, - in_tree_using_sums, - ), - }; + in_tree_type, + ) + } if let Some(level) = propagate_for_level { - add_average_case_merk_propagate(&mut cost, level).map_err(Error::MerkError) + add_average_case_merk_propagate(&mut cost, level, grove_version) + .map_err(Error::MerkError) } else { Ok(()) } @@ -253,7 +307,7 @@ impl GroveDb { pub fn average_case_merk_replace_element( key: &KeyInfo, value: &Element, - in_tree_using_sums: bool, + in_tree_type: TreeType, propagate_for_level: Option<&EstimatedLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -269,23 +323,17 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { - Element::Tree(_, flags) | Element::SumTree(_, _, flags) => { + Element::Tree(_, flags) + | Element::SumTree(_, _, flags) + | Element::BigSumTree(_, _, flags) + | Element::CountTree(_, _, flags) => { let flags_len = flags.as_ref().map_or(0, |flags| { let flags_len = flags.len() as u32; flags_len + flags_len.required_space() as u32 }); - let tree_cost_size = if value.is_sum_tree() { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost_size = value.tree_type().unwrap().cost_size(); let value_len = tree_cost_size + flags_len; - add_cost_case_merk_replace_layered( - &mut cost, - key_len, - value_len, - in_tree_using_sums, - ) + add_cost_case_merk_replace_layered(&mut cost, key_len, value_len, in_tree_type) } Element::Item(_, flags) | Element::SumItem(_, flags) => { let flags_len = flags.as_ref().map_or(0, |flags| { @@ -299,22 +347,18 @@ impl GroveDb { cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32 }; let value_len = sum_item_cost_size + flags_len; - add_cost_case_merk_replace_same_size( - &mut cost, - key_len, - value_len, - in_tree_using_sums, - ) + add_cost_case_merk_replace_same_size(&mut cost, key_len, value_len, in_tree_type) } _ => add_cost_case_merk_replace_same_size( &mut cost, key_len, cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, - in_tree_using_sums, + in_tree_type, ), }; if let Some(level) = propagate_for_level { - add_average_case_merk_propagate(&mut cost, level).map_err(Error::MerkError) + add_average_case_merk_propagate(&mut cost, level, grove_version) + .map_err(Error::MerkError) } else { Ok(()) } @@ -328,7 +372,7 @@ impl GroveDb { key: &KeyInfo, value: &Element, change_in_bytes: i32, - in_tree_using_sums: bool, + in_tree_type: TreeType, propagate_for_level: Option<&EstimatedLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -359,7 +403,7 @@ impl GroveDb { key_len, value_len, change_in_bytes, - in_tree_using_sums, + in_tree_type, ) } _ => { @@ -368,7 +412,8 @@ impl GroveDb { } }; if let Some(level) = propagate_for_level { - add_average_case_merk_propagate(&mut cost, level).map_err(Error::MerkError) + add_average_case_merk_propagate(&mut cost, level, grove_version) + .map_err(Error::MerkError) } else { Ok(()) } @@ -397,12 +442,12 @@ impl GroveDb { &cost, estimated_layer_information .estimated_layer_sizes - .value_with_feature_and_flags_size() + .value_with_feature_and_flags_size(grove_version) .map_err(Error::MerkError) ); add_average_case_merk_delete(&mut cost, key_len, value_size); if propagate { - add_average_case_merk_propagate(&mut cost, estimated_layer_information) + add_average_case_merk_propagate(&mut cost, estimated_layer_information, grove_version) .map_err(Error::MerkError) } else { Ok(()) @@ -416,7 +461,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, estimated_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -431,7 +476,7 @@ impl GroveDb { let value_size = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ); cost.seek_count += 1; cost.storage_loaded_bytes += value_size as u64; @@ -445,8 +490,8 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, estimated_flags_size: u32, - is_sum_tree: bool, - in_parent_tree_using_sums: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -458,17 +503,13 @@ impl GroveDb { .add_average_case_has_raw_tree_cost ); - let estimated_element_size = if is_sum_tree { - SUM_TREE_COST_SIZE + estimated_flags_size - } else { - TREE_COST_SIZE + estimated_flags_size - }; + let estimated_element_size = tree_type.cost_size() + estimated_flags_size; Self::add_average_case_has_raw_cost::( cost, path, key, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type, grove_version, ) } @@ -479,7 +520,7 @@ impl GroveDb { _path: &KeyInfoPath, key: &KeyInfo, estimated_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -496,7 +537,7 @@ impl GroveDb { cost, key.max_length() as u32, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ) .map_err(Error::MerkError) } @@ -507,8 +548,8 @@ impl GroveDb { _path: &KeyInfoPath, key: &KeyInfo, estimated_flags_size: u32, - is_sum_tree: bool, - in_parent_tree_using_sums: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -520,17 +561,13 @@ impl GroveDb { .add_average_case_get_raw_tree_cost ); - let estimated_element_size = if is_sum_tree { - SUM_TREE_COST_SIZE + estimated_flags_size - } else { - TREE_COST_SIZE + estimated_flags_size - }; + let estimated_element_size = tree_type.cost_size() + estimated_flags_size; cost.seek_count += 1; add_average_case_get_merk_node( cost, key.max_length() as u32, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ) .map_err(Error::MerkError) } @@ -541,7 +578,7 @@ impl GroveDb { cost: &mut OperationCost, path: &KeyInfoPath, key: &KeyInfo, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, estimated_element_size: u32, estimated_references_sizes: Vec, grove_version: &GroveVersion, @@ -559,7 +596,7 @@ impl GroveDb { let value_size: u32 = TreeNode::average_case_encoded_tree_size( key.max_length() as u32, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ); cost.seek_count += 1 + estimated_references_sizes.len() as u32; cost.storage_loaded_bytes += value_size as u64 @@ -579,7 +616,7 @@ mod test { use grovedb_costs::OperationCost; use grovedb_merk::{ estimated_costs::average_case_costs::add_average_case_get_merk_node, - test_utils::make_batch_seq, tree::kv::ValueDefinedCostType, Merk, + test_utils::make_batch_seq, tree::kv::ValueDefinedCostType, tree_type::TreeType, Merk, }; use grovedb_storage::{ rocksdb_storage::RocksDbStorage, worst_case_costs::WorstKeyLength, Storage, StorageBatch, @@ -606,7 +643,7 @@ mod test { storage .get_storage_context(EMPTY_PATH, Some(&batch)) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -626,7 +663,7 @@ mod test { // Reopen merk: this time, only root node is loaded to memory let merk = Merk::open_base( storage.get_storage_context(EMPTY_PATH, None).unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -650,8 +687,13 @@ mod test { // (this will be the max_element_size) let mut cost = OperationCost::default(); let key = KnownKey(8_u64.to_be_bytes().to_vec()); - add_average_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false) - .expect("expected to add cost"); + add_average_case_get_merk_node( + &mut cost, + key.max_length() as u32, + 60, + TreeType::NormalTree.inner_node_type(), + ) + .expect("expected to add cost"); assert_eq!(cost, node_result.cost); } @@ -716,7 +758,7 @@ mod test { &path, &key, elem.serialized_size(grove_version).expect("expected size") as u32, - false, + TreeType::NormalTree, GroveVersion::latest(), ) .expect("expected to add cost"); diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index 4c409b99..c6f889f9 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -15,6 +15,7 @@ use grovedb_merk::{ }, }, tree::TreeNode, + tree_type::TreeType, HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; @@ -25,7 +26,7 @@ use integer_encoding::VarInt; use crate::{ batch::{key_info::KeyInfo, KeyInfoPath}, - element::{SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, + element::{CostSize, SUM_ITEM_COST_SIZE, SUM_TREE_COST_SIZE, TREE_COST_SIZE}, Element, ElementFlags, Error, GroveDb, }; @@ -36,7 +37,7 @@ impl GroveDb { pub fn add_worst_case_get_merk_at_path<'db, S: Storage<'db>>( cost: &mut OperationCost, path: &KeyInfoPath, - is_sum_tree: bool, + tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -55,7 +56,7 @@ impl GroveDb { cost.storage_loaded_bytes += TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, HASH_LENGTH as u32, - is_sum_tree, + tree_type.inner_node_type(), // todo This is probably wrong ) as u64; } } @@ -66,8 +67,8 @@ impl GroveDb { /// Add worst case for insertion into merk pub(crate) fn worst_case_merk_replace_tree( key: &KeyInfo, - is_sum_tree: bool, - is_in_parent_sum_tree: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, grove_version: &GroveVersion, @@ -83,17 +84,13 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; - let tree_cost = if is_sum_tree { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost = tree_type.cost_size(); let layer_extra_size = tree_cost + WORST_CASE_FLAGS_LEN; add_worst_case_merk_replace_layered( &mut cost, key_len, layer_extra_size, - is_in_parent_sum_tree, + in_parent_tree_type.inner_node_type(), ); if propagate { add_worst_case_merk_propagate(&mut cost, worst_case_layer_information) @@ -108,8 +105,8 @@ impl GroveDb { pub fn worst_case_merk_insert_tree( key: &KeyInfo, flags: &Option, - is_sum_tree: bool, - is_in_parent_sum_tree: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, propagate_if_input: Option<&WorstCaseLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -128,13 +125,9 @@ impl GroveDb { let flags_len = flags.len() as u32; flags_len + flags_len.required_space() as u32 }); - let tree_cost = if is_sum_tree { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost = tree_type.cost_size(); let value_len = tree_cost + flags_len; - add_cost_case_merk_insert_layered(&mut cost, key_len, value_len, is_in_parent_sum_tree); + add_cost_case_merk_insert_layered(&mut cost, key_len, value_len, in_parent_tree_type); if let Some(input) = propagate_if_input { add_worst_case_merk_propagate(&mut cost, input).map_err(Error::MerkError) } else { @@ -146,7 +139,7 @@ impl GroveDb { /// Add worst case for insertion into merk pub fn worst_case_merk_delete_tree( key: &KeyInfo, - is_sum_tree: bool, + tree_type: TreeType, worst_case_layer_information: &WorstCaseLayerInformation, propagate: bool, grove_version: &GroveVersion, @@ -162,11 +155,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; - let tree_cost = if is_sum_tree { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost = tree_type.cost_size(); let layer_extra_size = tree_cost + WORST_CASE_FLAGS_LEN; add_worst_case_merk_delete_layered(&mut cost, key_len, layer_extra_size); if propagate { @@ -184,7 +173,7 @@ impl GroveDb { pub fn worst_case_merk_insert_element( key: &KeyInfo, value: &Element, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, propagate_for_level: Option<&WorstCaseLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -200,29 +189,28 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; match value { - Element::Tree(_, flags) | Element::SumTree(_, _, flags) => { + Element::Tree(_, flags) + | Element::SumTree(_, _, flags) + | Element::BigSumTree(_, _, flags) + | Element::CountTree(_, _, flags) => { let flags_len = flags.as_ref().map_or(0, |flags| { let flags_len = flags.len() as u32; flags_len + flags_len.required_space() as u32 }); - let tree_cost_size = if value.is_sum_tree() { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost_size = value.tree_type().unwrap().cost_size(); let value_len = tree_cost_size + flags_len; add_cost_case_merk_insert_layered( &mut cost, key_len, value_len, - in_parent_tree_using_sums, + in_parent_tree_type, ) } _ => add_cost_case_merk_insert( &mut cost, key_len, cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, - in_parent_tree_using_sums, + in_parent_tree_type, ), }; if let Some(level) = propagate_for_level { @@ -239,7 +227,7 @@ impl GroveDb { pub fn worst_case_merk_replace_element( key: &KeyInfo, value: &Element, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, propagate_for_level: Option<&WorstCaseLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -270,7 +258,7 @@ impl GroveDb { &mut cost, key_len, value_len, - in_parent_tree_using_sums, + in_parent_tree_type, ) } Element::SumItem(_, flags) => { @@ -283,14 +271,14 @@ impl GroveDb { &mut cost, key_len, value_len, - in_parent_tree_using_sums, + in_parent_tree_type, ) } _ => add_cost_case_merk_replace( &mut cost, key_len, cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, - in_parent_tree_using_sums, + in_parent_tree_type, ), }; if let Some(level) = propagate_for_level { @@ -308,7 +296,7 @@ impl GroveDb { key: &KeyInfo, value: &Element, change_in_bytes: i32, - in_tree_using_sums: bool, + in_parent_tree_type: TreeType, propagate_for_level: Option<&WorstCaseLayerInformation>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -339,7 +327,7 @@ impl GroveDb { key_len, value_len, change_in_bytes, - in_tree_using_sums, + in_parent_tree_type, ) } _ => { @@ -389,7 +377,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, max_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -404,7 +392,7 @@ impl GroveDb { let value_size = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ); cost.seek_count += 1; cost.storage_loaded_bytes += value_size as u64; @@ -417,8 +405,8 @@ impl GroveDb { cost: &mut OperationCost, _path: &KeyInfoPath, key: &KeyInfo, - is_sum_tree: bool, - in_parent_tree_using_sums: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -431,16 +419,12 @@ impl GroveDb { ); cost.seek_count += 1; - let tree_cost_size = if is_sum_tree { - SUM_TREE_COST_SIZE - } else { - TREE_COST_SIZE - }; + let tree_cost_size = tree_type.cost_size(); add_worst_case_get_merk_node( cost, key.max_length() as u32, tree_cost_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ) .map_err(Error::MerkError) } @@ -451,7 +435,7 @@ impl GroveDb { _path: &KeyInfoPath, key: &KeyInfo, max_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result<(), Error> { check_grovedb_v0!( @@ -468,7 +452,7 @@ impl GroveDb { cost, key.max_length() as u32, max_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ) .map_err(Error::MerkError) } @@ -479,7 +463,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, max_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, max_references_sizes: Vec, grove_version: &GroveVersion, ) -> Result<(), Error> { @@ -496,7 +480,7 @@ impl GroveDb { let value_size: u32 = TreeNode::worst_case_encoded_tree_size( key.max_length() as u32, max_element_size, - in_parent_tree_using_sums, + in_parent_tree_type.inner_node_type(), ); cost.seek_count += 1 + max_references_sizes.len() as u32; cost.storage_loaded_bytes += @@ -513,8 +497,10 @@ mod test { use grovedb_costs::OperationCost; use grovedb_merk::{ estimated_costs::worst_case_costs::add_worst_case_get_merk_node, + merk::NodeType, test_utils::{empty_path_merk, empty_path_merk_read_only, make_batch_seq}, tree::kv::ValueDefinedCostType, + tree_type::TreeType, }; use grovedb_storage::{ rocksdb_storage::{test_utils::TempStorage, RocksDbStorage}, @@ -569,7 +555,7 @@ mod test { // (this will be the max_element_size) let mut cost = OperationCost::default(); let key = KnownKey(8_u64.to_be_bytes().to_vec()); - add_worst_case_get_merk_node(&mut cost, key.max_length() as u32, 60, false) + add_worst_case_get_merk_node(&mut cost, key.max_length() as u32, 60, NodeType::NormalNode) .expect("no issue with version"); assert_eq!(cost, node_result.cost); } @@ -635,7 +621,7 @@ mod test { &path, &key, elem.serialized_size(grove_version).expect("expected size") as u32, - false, + TreeType::NormalTree, GroveVersion::latest(), ) .expect("expected to add cost"); diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 8e0b088a..7b4e9c00 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -161,7 +161,7 @@ use std::{collections::HashMap, option::Option::None, path::Path}; use debugger::start_visualizer; #[cfg(any(feature = "minimal", feature = "verify"))] pub use element::Element; -#[cfg(feature = "minimal")] +#[cfg(any(feature = "minimal", feature = "verify"))] pub use element::ElementFlags; #[cfg(feature = "minimal")] use grovedb_costs::{ @@ -181,6 +181,10 @@ pub use grovedb_merk::proofs::Query; #[cfg(feature = "minimal")] use grovedb_merk::tree::kv::ValueDefinedCostType; #[cfg(feature = "minimal")] +pub use grovedb_merk::tree::AggregateData; +#[cfg(any(feature = "minimal", feature = "verify"))] +pub use grovedb_merk::tree_type::{MaybeTree, TreeType}; +#[cfg(feature = "minimal")] use grovedb_merk::{ self, tree::{combine_hash, value_hash}, @@ -256,7 +260,7 @@ type VerificationIssues = HashMap>, (CryptoHash, CryptoHash, CryptoH type OpenedMerkForReplication<'tx> = ( Merk>, Option>, - bool, + TreeType, ); #[cfg(feature = "minimal")] @@ -318,12 +322,11 @@ impl GroveDb { } ) ); - let is_sum_tree = element.is_sum_tree(); - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -340,7 +343,7 @@ impl GroveDb { } else { Merk::open_base( storage, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -353,7 +356,7 @@ impl GroveDb { &'db self, prefix: SubtreePrefix, root_key: Option>, - is_sum_tree: bool, + tree_type: TreeType, tx: &'db Transaction, batch: Option<&'db StorageBatch>, grove_version: &GroveVersion, @@ -367,7 +370,7 @@ impl GroveDb { Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -380,7 +383,7 @@ impl GroveDb { } else { Merk::open_base( storage, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -421,13 +424,12 @@ impl GroveDb { )) }) .unwrap()?; - let is_sum_tree = element.is_sum_tree(); - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { Ok(( Merk::open_layered_with_root_key( storage, root_key.clone(), - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -436,7 +438,7 @@ impl GroveDb { }) .unwrap()?, root_key, - is_sum_tree, + tree_type, )) } else { Err(Error::CorruptedPath( @@ -447,14 +449,14 @@ impl GroveDb { Ok(( Merk::open_base( storage, - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) .unwrap()?, None, - false, + TreeType::NormalTree, )) } } @@ -494,12 +496,11 @@ impl GroveDb { } ) ); - let is_sum_tree = element.is_sum_tree(); - if let Element::Tree(root_key, _) | Element::SumTree(root_key, ..) = element { + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -516,7 +517,7 @@ impl GroveDb { } else { Merk::open_base( storage, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -529,7 +530,7 @@ impl GroveDb { &'db self, prefix: SubtreePrefix, root_key: Option>, - is_sum_tree: bool, + tree_type: TreeType, batch: Option<&'db StorageBatch>, grove_version: &GroveVersion, ) -> CostResult>, Error> { @@ -542,7 +543,7 @@ impl GroveDb { Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -555,7 +556,7 @@ impl GroveDb { } else { Merk::open_base( storage, - false, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -651,9 +652,11 @@ impl GroveDb { grove_version, ) ); - let (root_hash, root_key, sum) = cost_return_on_error!( + let (root_hash, root_key, aggregate_data) = cost_return_on_error!( &mut cost, - child_tree.root_hash_key_and_sum().map_err(Error::MerkError) + child_tree + .root_hash_key_and_aggregate_data() + .map_err(Error::MerkError) ); cost_return_on_error!( &mut cost, @@ -662,7 +665,7 @@ impl GroveDb { parent_key, root_key, root_hash, - sum, + aggregate_data, grove_version, ) ); @@ -705,9 +708,11 @@ impl GroveDb { grove_version ) ); - let (root_hash, root_key, sum) = cost_return_on_error!( + let (root_hash, root_key, aggregate_data) = cost_return_on_error!( &mut cost, - child_tree.root_hash_key_and_sum().map_err(Error::MerkError) + child_tree + .root_hash_key_and_aggregate_data() + .map_err(Error::MerkError) ); cost_return_on_error!( &mut cost, @@ -716,7 +721,7 @@ impl GroveDb { parent_key, root_key, root_hash, - sum, + aggregate_data, grove_version, ) ); @@ -758,7 +763,9 @@ impl GroveDb { ); let (root_hash, root_key, sum) = cost_return_on_error!( &mut cost, - child_tree.root_hash_key_and_sum().map_err(Error::MerkError) + child_tree + .root_hash_key_and_aggregate_data() + .map_err(Error::MerkError) ); cost_return_on_error!( &mut cost, @@ -783,7 +790,7 @@ impl GroveDb { key: K, maybe_root_key: Option>, root_tree_hash: Hash, - sum: Option, + aggregate_data: AggregateData, grove_version: &GroveVersion, ) -> CostResult<(), Error> { let key_ref = key.as_ref(); @@ -795,7 +802,47 @@ impl GroveDb { } else if let Element::SumTree(.., flag) = element { let tree = Element::new_sum_tree_with_flags_and_sum_value( maybe_root_key, - sum.unwrap_or_default(), + aggregate_data.as_sum_i64(), + flag, + ); + tree.insert_subtree( + parent_tree, + key.as_ref(), + root_tree_hash, + None, + grove_version, + ) + } else if let Element::BigSumTree(.., flag) = element { + let tree = Element::new_big_sum_tree_with_flags_and_sum_value( + maybe_root_key, + aggregate_data.as_summed_i128(), + flag, + ); + tree.insert_subtree( + parent_tree, + key.as_ref(), + root_tree_hash, + None, + grove_version, + ) + } else if let Element::CountTree(.., flag) = element { + let tree = Element::new_count_tree_with_flags_and_count_value( + maybe_root_key, + aggregate_data.as_count_u64(), + flag, + ); + tree.insert_subtree( + parent_tree, + key.as_ref(), + root_tree_hash, + None, + grove_version, + ) + } else if let Element::CountSumTree(.., flag) = element { + let tree = Element::new_count_sum_tree_with_flags_and_sum_and_count_value( + maybe_root_key, + aggregate_data.as_count_u64(), + aggregate_data.as_sum_i64(), flag, ); tree.insert_subtree( @@ -825,7 +872,7 @@ impl GroveDb { key: K, maybe_root_key: Option>, root_tree_hash: Hash, - sum: Option, + aggregate_data: AggregateData, batch_operations: &mut Vec>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { @@ -836,7 +883,7 @@ impl GroveDb { let tree = Element::new_tree_with_flags(maybe_root_key, flag); let merk_feature_type = cost_return_on_error!( &mut cost, - tree.get_feature_type(parent_tree.is_sum_tree) + tree.get_feature_type(parent_tree.tree_type) .wrap_with_cost(OperationCost::default()) ); tree.insert_subtree_into_batch_operations( @@ -850,12 +897,70 @@ impl GroveDb { } else if let Element::SumTree(.., flag) = element { let tree = Element::new_sum_tree_with_flags_and_sum_value( maybe_root_key, - sum.unwrap_or_default(), + aggregate_data.as_sum_i64(), + flag, + ); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.tree_type) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else if let Element::BigSumTree(.., flag) = element { + let tree = Element::new_big_sum_tree_with_flags_and_sum_value( + maybe_root_key, + aggregate_data.as_summed_i128(), + flag, + ); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.tree_type) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else if let Element::CountTree(.., flag) = element { + let tree = Element::new_count_tree_with_flags_and_count_value( + maybe_root_key, + aggregate_data.as_count_u64(), + flag, + ); + let merk_feature_type = cost_return_on_error!( + &mut cost, + tree.get_feature_type(parent_tree.tree_type) + .wrap_with_cost(OperationCost::default()) + ); + tree.insert_subtree_into_batch_operations( + key, + root_tree_hash, + true, + batch_operations, + merk_feature_type, + grove_version, + ) + } else if let Element::CountSumTree(.., flag) = element { + let tree = Element::new_count_sum_tree_with_flags_and_sum_and_count_value( + maybe_root_key, + aggregate_data.as_count_u64(), + aggregate_data.as_sum_i64(), flag, ); let merk_feature_type = cost_return_on_error!( &mut cost, - tree.get_feature_type(parent_tree.is_sum_tree) + tree.get_feature_type(parent_tree.tree_type) .wrap_with_cost(OperationCost::default()) ); tree.insert_subtree_into_batch_operations( @@ -1095,7 +1200,11 @@ impl GroveDb { while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { let element = raw_decode(&element_value, grove_version)?; match element { - Element::SumTree(..) | Element::Tree(..) => { + Element::SumTree(..) + | Element::Tree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, @@ -1239,7 +1348,11 @@ impl GroveDb { while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { let element = raw_decode(&element_value, grove_version)?; match element { - Element::SumTree(..) | Element::Tree(..) => { + Element::SumTree(..) + | Element::Tree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => { let (kv_value, element_value_hash) = merk .get_value_and_value_hash( &key, diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index b828ce0c..6e8b0158 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -8,6 +8,7 @@ use grovedb_merk::{ average_case_costs::EstimatedLayerInformation, worst_case_costs::add_average_case_cost_for_is_empty_tree_except, }, + tree_type::TreeType, HASH_LENGTH_U32, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; @@ -65,7 +66,7 @@ impl GroveDb { except_keys_count, key_len, estimated_element_size, - is_sum_tree, + tree_type, ) = cost_return_on_error_no_add!( &cost, if height == path_len - 1 { @@ -74,7 +75,7 @@ impl GroveDb { &cost, layer_info .estimated_layer_sizes - .value_with_feature_and_flags_size() + .value_with_feature_and_flags_size(grove_version) .map_err(Error::MerkError) ); Ok(( @@ -84,7 +85,7 @@ impl GroveDb { 0, key.max_length() as u32, estimated_value_len, - layer_info.is_sum_tree, + layer_info.tree_type, )) } else { Err(Error::InvalidParameter( @@ -99,7 +100,7 @@ impl GroveDb { &cost, layer_info .estimated_layer_sizes - .subtree_with_feature_and_flags_size() + .subtree_with_feature_and_flags_size(grove_version) .map_err(Error::MerkError) ); Ok(( @@ -109,7 +110,7 @@ impl GroveDb { 1, last_key.max_length() as u32, estimated_value_len, - layer_info.is_sum_tree, + layer_info.tree_type, )) } else { Err(Error::InvalidParameter("intermediate layer info missing")) @@ -121,7 +122,7 @@ impl GroveDb { Self::average_case_delete_operation_for_delete::( &KeyInfoPath::from_vec(path_at_level.to_vec()), key_at_level, - is_sum_tree, + tree_type, validate, check_if_tree, except_keys_count, @@ -139,7 +140,7 @@ impl GroveDb { pub fn average_case_delete_operation_for_delete<'db, S: Storage<'db>>( path: &KeyInfoPath, key: &KeyInfo, - parent_tree_is_sum_tree: bool, + in_parent_tree_type: TreeType, validate: bool, check_if_tree: bool, except_keys_count: u16, @@ -163,7 +164,7 @@ impl GroveDb { &mut cost, path, false, - parent_tree_is_sum_tree, + in_parent_tree_type, grove_version, ) ); @@ -176,7 +177,7 @@ impl GroveDb { path, key, estimated_key_element_size.1, - parent_tree_is_sum_tree, + in_parent_tree_type, grove_version, ) ); diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index 2255f29d..2b167374 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -5,6 +5,7 @@ use grovedb_costs::{ storage_cost::removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, CostResult, CostsExt, OperationCost, }; +use grovedb_merk::MaybeTree; use grovedb_path::SubtreePath; use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, @@ -169,7 +170,7 @@ impl GroveDb { path: SubtreePath, key: &[u8], options: &DeleteUpTreeOptions, - is_known_to_be_subtree_with_sum: Option<(bool, bool)>, + is_known_to_be_subtree: Option, mut current_batch_operations: Vec, transaction: TransactionArg, grove_version: &GroveVersion, @@ -186,7 +187,7 @@ impl GroveDb { path, key, options, - is_known_to_be_subtree_with_sum, + is_known_to_be_subtree, &mut current_batch_operations, transaction, grove_version, @@ -201,7 +202,7 @@ impl GroveDb { path: SubtreePath, key: &[u8], options: &DeleteUpTreeOptions, - is_known_to_be_subtree_with_sum: Option<(bool, bool)>, + is_known_to_be_subtree: Option, current_batch_operations: &mut Vec, transaction: TransactionArg, grove_version: &GroveVersion, @@ -234,7 +235,7 @@ impl GroveDb { path.clone(), key, &options.to_delete_options(), - is_known_to_be_subtree_with_sum, + is_known_to_be_subtree, current_batch_operations, transaction, grove_version, diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 9e24ad3e..8bf2e0ea 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -18,7 +18,8 @@ use grovedb_costs::{ storage_cost::removal::{StorageRemovedBytes, StorageRemovedBytes::BasicStorageRemoval}, CostResult, CostsExt, OperationCost, }; -use grovedb_merk::{proofs::Query, KVIterator}; +#[cfg(feature = "minimal")] +use grovedb_merk::{proofs::Query, KVIterator, MaybeTree}; #[cfg(feature = "minimal")] use grovedb_merk::{Error as MerkError, Merk, MerkOptions}; use grovedb_path::SubtreePath; @@ -511,7 +512,7 @@ impl GroveDb { path: SubtreePath, key: &[u8], options: &DeleteOptions, - is_known_to_be_subtree_with_sum: Option<(bool, bool)>, + is_known_to_be_subtree: Option, current_batch_operations: &[QualifiedGroveDbOp], transaction: TransactionArg, grove_version: &GroveVersion, @@ -544,28 +545,24 @@ impl GroveDb { ) ); } - let (is_subtree, is_subtree_with_sum) = match is_known_to_be_subtree_with_sum { + let tree_type = match is_known_to_be_subtree { None => { let element = cost_return_on_error!( &mut cost, self.get_raw(path.clone(), key.as_ref(), transaction, grove_version) ); - match element { - Element::Tree(..) => (true, false), - Element::SumTree(..) => (true, true), - _ => (false, false), - } + element.maybe_tree_type() } Some(x) => x, }; - if is_subtree { + if let MaybeTree::Tree(tree_type) = tree_type { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_merk_path_vec = subtree_merk_path.to_vec(); let batch_deleted_keys = current_batch_operations .iter() .filter_map(|op| match op.op { - GroveOp::Delete | GroveOp::DeleteTree | GroveOp::DeleteSumTree => { + GroveOp::Delete | GroveOp::DeleteTree(_) => { // todo: to_path clones (best to figure out how to compare without // cloning) if op.path.to_path() == subtree_merk_path_vec { @@ -595,7 +592,7 @@ impl GroveDb { // If there is any current batch operation that is inserting something in this // tree then it is not empty either is_empty &= !current_batch_operations.iter().any(|op| match op.op { - GroveOp::Delete | GroveOp::DeleteTree | GroveOp::DeleteSumTree => false, + GroveOp::Delete | GroveOp::DeleteTree(_) => false, // todo: fix for to_path (it clones) _ => op.path.to_path() == subtree_merk_path_vec, }); @@ -613,7 +610,7 @@ impl GroveDb { Ok(Some(QualifiedGroveDbOp::delete_tree_op( path.to_vec(), key.to_vec(), - is_subtree_with_sum, + tree_type, ))) } else { Err(Error::NotSupported( @@ -711,8 +708,8 @@ impl GroveDb { grove_version ) ); - let uses_sum_tree = subtree_to_delete_from.is_sum_tree; - if element.is_any_tree() { + let uses_sum_tree = subtree_to_delete_from.tree_type; + if let Some(tree_type) = element.tree_type() { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_merk_path_ref = SubtreePath::from(&subtree_merk_path); @@ -771,7 +768,7 @@ impl GroveDb { Merk::open_layered_with_root_key( storage, subtree_to_delete_from.root_key(), - element.is_sum_tree(), + tree_type, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -905,7 +902,7 @@ impl GroveDb { &mut cost, self.open_non_transactional_merk_at_path(path.clone(), Some(batch), grove_version) ); - let uses_sum_tree = subtree_to_delete_from.is_sum_tree; + let uses_sum_tree = subtree_to_delete_from.tree_type; if element.is_any_tree() { let subtree_merk_path = path.derive_owned_with_child(key); let subtree_of_tree_we_are_deleting = cost_return_on_error!( diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index effcb5fe..89454149 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -5,6 +5,7 @@ use grovedb_costs::{ }; use grovedb_merk::{ estimated_costs::worst_case_costs::add_worst_case_cost_for_is_empty_tree_except, tree::kv::KV, + tree_type::TreeType, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; use grovedb_version::{ @@ -26,7 +27,7 @@ impl GroveDb { key: &KeyInfo, stop_path_height: Option, validate: bool, - intermediate_tree_info: IntMap, + intermediate_tree_info: IntMap, max_element_size: u32, grove_version: &GroveVersion, ) -> CostResult, Error> { @@ -59,13 +60,12 @@ impl GroveDb { check_if_tree, except_keys_count, max_element_size, - is_sum_tree, + tree_type, ) = cost_return_on_error_no_add!( &cost, if height == path_len { - if let Some((is_in_sum_tree, _)) = intermediate_tree_info.get(height as u64) - { - Ok((used_path, key, true, 0, max_element_size, *is_in_sum_tree)) + if let Some((tree_type, _)) = intermediate_tree_info.get(height as u64) { + Ok((used_path, key, true, 0, max_element_size, *tree_type)) } else { Err(Error::InvalidParameter( "intermediate flag size missing for height at path length", @@ -74,25 +74,19 @@ impl GroveDb { } else { let (last_key, smaller_path) = used_path.split_last().unwrap(); used_path = smaller_path; - if let Some((is_in_sum_tree, flags_size_at_level)) = + if let Some((tree_type, flags_size_at_level)) = intermediate_tree_info.get(height as u64) { // the worst case is that we are only in sum trees + // Todo the worst case is actually now big sum trees let value_len = SUM_TREE_COST_SIZE + flags_size_at_level; let max_tree_size = KV::layered_node_byte_cost_size_for_key_and_value_lengths( last_key.max_length() as u32, value_len, - *is_in_sum_tree, + tree_type.inner_node_type(), ); - Ok(( - used_path, - last_key, - false, - 1, - max_tree_size, - *is_in_sum_tree, - )) + Ok((used_path, last_key, false, 1, max_tree_size, *tree_type)) } else { Err(Error::InvalidParameter("intermediate flag size missing")) } @@ -103,7 +97,7 @@ impl GroveDb { Self::worst_case_delete_operation_for_delete::( &KeyInfoPath::from_vec(path_at_level.to_vec()), key_at_level, - is_sum_tree, + tree_type, validate, check_if_tree, except_keys_count, @@ -121,7 +115,7 @@ impl GroveDb { pub fn worst_case_delete_operation_for_delete<'db, S: Storage<'db>>( path: &KeyInfoPath, key: &KeyInfo, - parent_tree_is_sum_tree: bool, + in_parent_tree_type: TreeType, validate: bool, check_if_tree: bool, except_keys_count: u16, @@ -144,7 +138,7 @@ impl GroveDb { GroveDb::add_worst_case_get_merk_at_path::( &mut cost, path, - parent_tree_is_sum_tree, + in_parent_tree_type, grove_version, ) ); @@ -157,7 +151,7 @@ impl GroveDb { path, key, max_element_size, - parent_tree_is_sum_tree, + in_parent_tree_type, grove_version, ) ); diff --git a/grovedb/src/operations/get/average_case.rs b/grovedb/src/operations/get/average_case.rs index 0cb44462..d920ad01 100644 --- a/grovedb/src/operations/get/average_case.rs +++ b/grovedb/src/operations/get/average_case.rs @@ -3,6 +3,8 @@ #[cfg(feature = "minimal")] use grovedb_costs::OperationCost; #[cfg(feature = "minimal")] +use grovedb_merk::tree_type::TreeType; +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; @@ -21,7 +23,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, estimated_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -38,7 +40,7 @@ impl GroveDb { path, key, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type, grove_version, )?; Ok(cost) @@ -50,8 +52,8 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, estimated_flags_size: u32, - is_sum_tree: bool, - in_parent_tree_using_sums: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -68,8 +70,8 @@ impl GroveDb { path, key, estimated_flags_size, - is_sum_tree, - in_parent_tree_using_sums, + tree_type, + in_parent_tree_type, grove_version, )?; Ok(cost) @@ -81,7 +83,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, estimated_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -98,7 +100,7 @@ impl GroveDb { path, key, estimated_element_size, - in_parent_tree_using_sums, + in_parent_tree_type, grove_version, )?; Ok(cost) @@ -108,7 +110,7 @@ impl GroveDb { pub fn average_case_for_get( path: &KeyInfoPath, key: &KeyInfo, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, estimated_element_size: u32, estimated_references_sizes: Vec, grove_version: &GroveVersion, @@ -126,7 +128,7 @@ impl GroveDb { &mut cost, path, key, - in_parent_tree_using_sums, + in_parent_tree_type, estimated_element_size, estimated_references_sizes, grove_version, @@ -139,8 +141,8 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, estimated_flags_size: u32, - is_sum_tree: bool, - in_parent_tree_using_sums: bool, + tree_type: TreeType, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -157,8 +159,8 @@ impl GroveDb { path, key, estimated_flags_size, - is_sum_tree, - in_parent_tree_using_sums, + tree_type, + in_parent_tree_type, grove_version, )?; Ok(cost) diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index 4b1d0ccd..d52fe4b1 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -22,6 +22,8 @@ use grovedb_version::{ check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, }; +#[cfg(feature = "minimal")] +use crate::error::GroveDbErrorExt; #[cfg(feature = "minimal")] use crate::{ reference_path::{path_from_reference_path_type, path_from_reference_qualified_path_type}, @@ -295,7 +297,7 @@ impl GroveDb { let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_transactional_merk_at_path(path, transaction, None, grove_version) + self.open_transactional_merk_at_path(path.clone(), transaction, None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => { Error::PathParentLayerNotFound(s) @@ -304,7 +306,9 @@ impl GroveDb { }) ); - Element::get(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) + Element::get(&merk_to_get_from, key, allow_cache, grove_version) + .add_context(format!("path is {}", path)) + .add_cost(cost) } /// Get tree item without following references @@ -353,7 +357,7 @@ impl GroveDb { let merk_to_get_from = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path, None, grove_version) + self.open_non_transactional_merk_at_path(path.clone(), None, grove_version) .map_err(|e| match e { Error::InvalidParentLayerPath(s) => { Error::PathParentLayerNotFound(s) @@ -362,7 +366,9 @@ impl GroveDb { }) ); - Element::get(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) + Element::get(&merk_to_get_from, key, allow_cache, grove_version) + .add_context(format!("path is {}", path)) + .add_cost(cost) } /// Get tree item without following references @@ -445,6 +451,7 @@ impl GroveDb { ); Element::get(&merk_to_get_from, parent_key, true, grove_version) + .add_context(format!("path is {}", path)) } else { let merk_to_get_from = cost_return_on_error!( &mut cost, @@ -452,10 +459,15 @@ impl GroveDb { ); Element::get(&merk_to_get_from, parent_key, true, grove_version) + .add_context(format!("path is {}", path)) } .unwrap_add_cost(&mut cost); match element { - Ok(Element::Tree(..)) | Ok(Element::SumTree(..)) => Ok(()).wrap_with_cost(cost), + Ok(Element::Tree(..)) + | Ok(Element::SumTree(..)) + | Ok(Element::BigSumTree(..)) + | Ok(Element::CountTree(..)) + | Ok(Element::CountSumTree(..)) => Ok(()).wrap_with_cost(cost), Ok(_) | Err(Error::PathKeyNotFound(_)) => Err(error_fn()).wrap_with_cost(cost), Err(e) => Err(e).wrap_with_cost(cost), } diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index 0e97f7fb..c1b75468 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -14,7 +14,8 @@ use integer_encoding::VarInt; #[cfg(feature = "minimal")] use crate::element::SumValue; use crate::{ - element::QueryOptions, operations::proof::ProveOptions, + element::{BigSumValue, CountValue, QueryOptions}, + operations::proof::ProveOptions, query_result_type::PathKeyOptionalElementTrio, }; #[cfg(feature = "minimal")] @@ -32,6 +33,12 @@ pub enum QueryItemOrSumReturnType { ItemData(Vec), /// A sum item or a sum tree value SumValue(SumValue), + /// A big sum tree value + BigSumValue(BigSumValue), + /// A count value + CountValue(CountValue), + /// A count and sum value + CountSumValue(CountValue, SumValue), } #[cfg(feature = "minimal")] @@ -222,7 +229,12 @@ where { )), } } - Element::Item(..) | Element::SumItem(..) | Element::SumTree(..) => Ok(element), + Element::Item(..) + | Element::SumItem(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => Ok(element), Element::Tree(..) => Err(Error::InvalidQuery("path_queries can not refer to trees")), } } @@ -341,7 +353,11 @@ where { } Element::Item(item, _) => Ok(item), Element::SumItem(item, _) => Ok(item.encode_var_vec()), - Element::Tree(..) | Element::SumTree(..) => Err(Error::InvalidQuery( + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) => Err(Error::InvalidQuery( "path_queries can only refer to items and references", )), } @@ -422,6 +438,18 @@ where { Element::SumTree(_, sum_value, _) => { Ok(QueryItemOrSumReturnType::SumValue(sum_value)) } + Element::BigSumTree(_, big_sum_value, _) => { + Ok(QueryItemOrSumReturnType::BigSumValue(big_sum_value)) + } + Element::CountTree(_, count_value, _) => { + Ok(QueryItemOrSumReturnType::CountValue(count_value)) + } + Element::CountSumTree(_, count_value, sum_value, _) => { + Ok(QueryItemOrSumReturnType::CountSumValue( + count_value, + sum_value, + )) + } _ => Err(Error::InvalidQuery( "the reference must result in an item", )), @@ -439,6 +467,15 @@ where { Element::SumTree(_, sum_value, _) => { Ok(QueryItemOrSumReturnType::SumValue(sum_value)) } + Element::BigSumTree(_, big_sum_value, _) => { + Ok(QueryItemOrSumReturnType::BigSumValue(big_sum_value)) + } + Element::CountTree(_, count_value, _) => { + Ok(QueryItemOrSumReturnType::CountValue(count_value)) + } + Element::CountSumTree(_, count_value, sum_value, _) => Ok( + QueryItemOrSumReturnType::CountSumValue(count_value, sum_value), + ), Element::Tree(..) => Err(Error::InvalidQuery( "path_queries can only refer to items, sum items, references and sum \ trees", @@ -520,12 +557,15 @@ where { } } Element::SumItem(item, _) => Ok(item), - Element::Tree(..) | Element::SumTree(..) | Element::Item(..) => { - Err(Error::InvalidQuery( - "path_queries over sum items can only refer to sum items and \ - references", - )) - } + Element::Tree(..) + | Element::SumTree(..) + | Element::BigSumTree(..) + | Element::CountTree(..) + | Element::CountSumTree(..) + | Element::Item(..) => Err(Error::InvalidQuery( + "path_queries over sum items can only refer to sum items and \ + references", + )), } } _ => Err(Error::CorruptedCodeExecution( diff --git a/grovedb/src/operations/get/worst_case.rs b/grovedb/src/operations/get/worst_case.rs index 591d6150..e6382dd8 100644 --- a/grovedb/src/operations/get/worst_case.rs +++ b/grovedb/src/operations/get/worst_case.rs @@ -3,6 +3,8 @@ #[cfg(feature = "minimal")] use grovedb_costs::OperationCost; #[cfg(feature = "minimal")] +use grovedb_merk::tree_type::TreeType; +#[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; @@ -20,7 +22,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, max_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -37,7 +39,7 @@ impl GroveDb { path, key, max_element_size, - in_parent_tree_using_sums, + in_parent_tree_type, grove_version, )?; Ok(cost) @@ -48,7 +50,7 @@ impl GroveDb { path: &KeyInfoPath, key: &KeyInfo, max_element_size: u32, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -65,7 +67,7 @@ impl GroveDb { path, key, max_element_size, - in_parent_tree_using_sums, + in_parent_tree_type, grove_version, )?; Ok(cost) @@ -77,7 +79,7 @@ impl GroveDb { key: &KeyInfo, max_element_size: u32, max_references_sizes: Vec, - in_parent_tree_using_sums: bool, + in_parent_tree_type: TreeType, grove_version: &GroveVersion, ) -> Result { check_grovedb_v0!( @@ -94,7 +96,7 @@ impl GroveDb { path, key, max_element_size, - in_parent_tree_using_sums, + in_parent_tree_type, max_references_sizes, grove_version, )?; diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index 57a59380..e362a761 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -317,7 +317,10 @@ impl GroveDb { ) ); } - Element::Tree(ref value, _) | Element::SumTree(ref value, ..) => { + Element::Tree(ref value, _) + | Element::SumTree(ref value, ..) + | Element::BigSumTree(ref value, ..) + | Element::CountTree(ref value, ..) => { if value.is_some() { return Err(Error::InvalidCodeExecution( "a tree should be empty at the moment of insertion when not using batches", @@ -450,7 +453,10 @@ impl GroveDb { ) ); } - Element::Tree(ref value, _) | Element::SumTree(ref value, ..) => { + Element::Tree(ref value, _) + | Element::SumTree(ref value, ..) + | Element::BigSumTree(ref value, ..) + | Element::CountTree(ref value, ..) => { if value.is_some() { return Err(Error::InvalidCodeExecution( "a tree should be empty at the moment of insertion when not using batches", @@ -1593,6 +1599,87 @@ mod tests { ); } + #[test] + fn test_one_insert_item_cost_under_count_tree() { + let grove_version = GroveVersion::latest(); + let db = make_empty_grovedb(); + let tx = db.start_transaction(); + + db.insert( + EMPTY_PATH, + b"tree", + Element::empty_count_tree(), + None, + Some(&tx), + grove_version, + ) + .unwrap() + .unwrap(); + + let cost = db + .insert( + [b"tree".as_slice()].as_ref(), + b"key1", + Element::new_item(b"test".to_vec()), + None, + Some(&tx), + grove_version, + ) + .cost_as_result() + .unwrap(); + + // Explanation for 152 storage_written_bytes + + // Key -> 37 bytes + // 32 bytes for the key prefix + // 4 bytes for the key + // 1 byte for key_size (required space for 36) + + // Value -> 81 + // 1 for the enum type item + // 1 for size of test bytes + // 4 for test bytes + // 1 for the flag option (but no flags) + // 32 for node hash + // 32 for value hash (trees have this for free) + // 9 for Count node + // 1 byte for the value_size (required space for 1) + + // Parent Hook -> 48 + // Key Bytes 4 + // Hash Size 32 + // Key Length 1 + // Count Merk 9 + // Child Heights 2 + + // Total 37 + 81 + 48 = 166 + + // Explanation for replaced bytes + + // Replaced parent Value -> 86 + // 1 for the flag option (but no flags) + // 1 for the enum type + // 1 for an empty option + // 1 for the count merk + // 9 for the count + // 32 for node hash + // 40 for the parent hook + // 2 byte for the value_size + assert_eq!( + cost, + OperationCost { + seek_count: 5, // todo: verify this + storage_cost: StorageCost { + added_bytes: 166, + replaced_bytes: 87, + removed_bytes: NoStorageRemoval + }, + storage_loaded_bytes: 162, // todo: verify this + hash_node_calls: 8, // todo: verify this + } + ); + } + #[test] fn test_one_insert_item_with_apple_flags_cost() { let grove_version = GroveVersion::latest(); diff --git a/grovedb/src/operations/is_empty_tree.rs b/grovedb/src/operations/is_empty_tree.rs index a007d219..4dec3abf 100644 --- a/grovedb/src/operations/is_empty_tree.rs +++ b/grovedb/src/operations/is_empty_tree.rs @@ -2,6 +2,7 @@ #[cfg(feature = "minimal")] use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; +use grovedb_merk::tree_type::TreeType; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_version::error::GroveVersionError; diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index d8bfc209..aff48637 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -313,7 +313,10 @@ impl GroveDb { } has_a_result_at_level |= true; } - Ok(Element::Tree(Some(_), _)) | Ok(Element::SumTree(Some(_), ..)) + Ok(Element::Tree(Some(_), _)) + | Ok(Element::SumTree(Some(_), ..)) + | Ok(Element::BigSumTree(Some(_), ..)) + | Ok(Element::CountTree(Some(_), ..)) if !done_with_results && query.has_subquery_or_matching_in_path_on_key(key) => { diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index 1ac09c8b..dca00bb9 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -308,7 +308,11 @@ impl GroveDb { println!("lower layer had key {}", hex_to_ascii(key)); } match element { - Element::Tree(Some(_), _) | Element::SumTree(Some(_), ..) => { + Element::Tree(Some(_), _) + | Element::SumTree(Some(_), ..) + | Element::BigSumTree(Some(_), ..) + | Element::CountTree(Some(_), ..) + | Element::CountSumTree(Some(_), ..) => { path.push(key); let lower_hash = Self::verify_layer_proof( lower_layer, @@ -337,6 +341,9 @@ impl GroveDb { } Element::Tree(None, _) | Element::SumTree(None, ..) + | Element::BigSumTree(None, ..) + | Element::CountTree(None, ..) + | Element::CountSumTree(None, ..) | Element::SumItem(..) | Element::Item(..) | Element::Reference(..) => { diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 0996c324..000f97b2 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -2,7 +2,7 @@ mod state_sync_session; use std::pin::Pin; -use grovedb_merk::{tree::hash::CryptoHash, ChunkProducer}; +use grovedb_merk::{tree::hash::CryptoHash, tree_type::TreeType, ChunkProducer}; use grovedb_path::SubtreePath; use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; @@ -16,7 +16,7 @@ use crate::{Error, GroveDb, TransactionArg}; /// - `Option>`: The root key, which may be `None` if not present. /// - `bool`: Indicates whether the tree is a sum tree. /// - `Vec`: The chunk ID representing traversal instructions. -pub type ChunkIdentifier = (crate::SubtreePrefix, Option>, bool, Vec); +pub type ChunkIdentifier = (crate::SubtreePrefix, Option>, TreeType, Vec); pub const CURRENT_STATE_SYNC_VERSION: u16 = 1; @@ -82,7 +82,7 @@ impl GroveDb { } let root_app_hash = self.root_hash(transaction, grove_version).value?; - let (chunk_prefix, root_key, is_sum_tree, chunk_id) = + let (chunk_prefix, root_key, tree_type, chunk_id) = utils::decode_global_chunk_id(global_chunk_id, &root_app_hash)?; // TODO: Refactor this by writing fetch_chunk_inner (as only merk constructor @@ -92,7 +92,7 @@ impl GroveDb { .open_transactional_merk_by_prefix( chunk_prefix, root_key, - is_sum_tree, + tree_type, tx, None, grove_version, @@ -138,7 +138,7 @@ impl GroveDb { .open_non_transactional_merk_by_prefix( chunk_prefix, root_key, - is_sum_tree, + tree_type, None, grove_version, ) @@ -257,6 +257,7 @@ pub(crate) mod utils { use grovedb_merk::{ ed::Encode, proofs::{Decoder, Op}, + tree_type::TreeType, }; use crate::{replication::ChunkIdentifier, Error}; @@ -324,7 +325,7 @@ pub(crate) mod utils { if global_chunk_id == app_hash { let root_chunk_prefix_key: crate::SubtreePrefix = [0u8; 32]; - return Ok((root_chunk_prefix_key, None, false, vec![])); + return Ok((root_chunk_prefix_key, None, TreeType::NormalTree, vec![])); } let (chunk_prefix_key, remaining) = global_chunk_id.split_at(chunk_prefix_length); @@ -350,6 +351,8 @@ pub(crate) mod utils { } let (is_sum_tree, chunk_id) = remaining.split_at(is_sum_tree_length); + let tree_type = is_sum_tree[0].try_into()?; + let subtree_prefix: crate::SubtreePrefix = chunk_prefix_key .try_into() .map_err(|_| Error::CorruptedData("unable to construct subtree".to_string()))?; @@ -358,11 +361,11 @@ pub(crate) mod utils { Ok(( subtree_prefix, Some(root_key.to_vec()), - is_sum_tree[0] != 0, + tree_type, chunk_id.to_vec(), )) } else { - Ok((subtree_prefix, None, is_sum_tree[0] != 0, chunk_id.to_vec())) + Ok((subtree_prefix, None, tree_type, chunk_id.to_vec())) } } @@ -381,7 +384,7 @@ pub(crate) mod utils { pub fn encode_global_chunk_id( subtree_prefix: [u8; blake3::OUT_LEN], root_key_opt: Option>, - is_sum_tree: bool, + tree_type: TreeType, chunk_id: Vec, ) -> Vec { let mut res = vec![]; @@ -395,11 +398,7 @@ pub(crate) mod utils { res.push(0u8); } - let mut is_sum_tree_v = 0u8; - if is_sum_tree { - is_sum_tree_v = 1u8; - } - res.push(is_sum_tree_v); + res.push(tree_type as u8); res.extend(chunk_id.to_vec()); diff --git a/grovedb/src/replication/state_sync_session.rs b/grovedb/src/replication/state_sync_session.rs index 1ce41c4b..a8831f07 100644 --- a/grovedb/src/replication/state_sync_session.rs +++ b/grovedb/src/replication/state_sync_session.rs @@ -7,6 +7,7 @@ use std::{ use grovedb_merk::{ tree::{kv::ValueDefinedCostType, value_hash}, + tree_type::TreeType, CryptoHash, Restorer, }; use grovedb_path::SubtreePath; @@ -37,8 +38,8 @@ struct SubtreeStateSyncInfo<'db> { /// Tree root key root_key: Option>, - /// Is Sum tree? - is_sum_tree: bool, + /// The type of tree + tree_type: TreeType, /// Path of current tree current_path: Vec>, @@ -130,7 +131,7 @@ impl<'tx> SubtreeStateSyncInfo<'tx> { SubtreeStateSyncInfo { restorer, root_key: None, - is_sum_tree: false, + tree_type: TreeType::NormalTree, pending_chunks: Default::default(), current_path: vec![], num_processed_chunks: 0, @@ -246,14 +247,14 @@ impl<'db> MultiStateSyncSession<'db> { &*(tx as *const _) }; - if let Ok((merk, root_key, is_sum_tree)) = + if let Ok((merk, root_key, tree_type)) = db.open_merk_for_replication(path.clone(), transaction_ref, grove_version) { let restorer = Restorer::new(merk, hash, actual_hash); let mut sync_info = SubtreeStateSyncInfo::new(restorer); sync_info.pending_chunks.insert(vec![]); sync_info.root_key = root_key.clone(); - sync_info.is_sum_tree = is_sum_tree; + sync_info.tree_type = tree_type; sync_info.current_path = path.to_vec(); self.as_mut() .current_prefixes() @@ -261,7 +262,7 @@ impl<'db> MultiStateSyncSession<'db> { Ok(encode_global_chunk_id( chunk_prefix, root_key, - is_sum_tree, + tree_type, vec![], )) } else { @@ -367,7 +368,7 @@ impl<'db> MultiStateSyncSession<'db> { next_chunk_ids.push(encode_global_chunk_id( chunk_prefix, subtree_state_sync.root_key.clone(), - subtree_state_sync.is_sum_tree, + subtree_state_sync.tree_type, local_chunk_id.clone(), )); } diff --git a/grovedb/src/tests/count_sum_tree_tests.rs b/grovedb/src/tests/count_sum_tree_tests.rs new file mode 100644 index 00000000..f171aee0 --- /dev/null +++ b/grovedb/src/tests/count_sum_tree_tests.rs @@ -0,0 +1,556 @@ +//! Count sum tree tests + +#[cfg(test)] +mod count_sum_tree_tests { + use grovedb_merk::{ + tree::{kv::ValueDefinedCostType, AggregateData}, + TreeFeatureType, + }; + use grovedb_storage::StorageBatch; + use grovedb_version::version::GroveVersion; + + use crate::{ + batch::QualifiedGroveDbOp, + tests::{make_test_grovedb, TEST_LEAF}, + Element, + }; + + #[test] + fn test_count_sum_tree_behaves_like_regular_tree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Insert a CountSumTree + db.insert( + [TEST_LEAF].as_ref(), + b"count_sum_key", + Element::new_count_sum_tree(None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert CountSumTree"); + + // Fetch the CountSumTree + let count_sum_tree = db + .get([TEST_LEAF].as_ref(), b"count_sum_key", None, grove_version) + .unwrap() + .expect("should get CountSumTree"); + assert!(matches!(count_sum_tree, Element::CountSumTree(..))); + + // Insert items into the CountSumTree + db.insert( + [TEST_LEAF, b"count_sum_key"].as_ref(), + b"item1", + Element::new_item(vec![1]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item1"); + + db.insert( + [TEST_LEAF, b"count_sum_key"].as_ref(), + b"item2", + Element::new_sum_item(3), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item2"); + + db.insert( + [TEST_LEAF, b"count_sum_key"].as_ref(), + b"item3", + Element::new_sum_item(5), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item3"); + + // Test proper item retrieval + let item1 = db + .get( + [TEST_LEAF, b"count_sum_key"].as_ref(), + b"item1", + None, + grove_version, + ) + .unwrap() + .expect("should get item1"); + assert_eq!(item1, Element::new_item(vec![1])); + + let item2 = db + .get( + [TEST_LEAF, b"count_sum_key"].as_ref(), + b"item2", + None, + grove_version, + ) + .unwrap() + .expect("should get item2"); + assert_eq!(item2, Element::new_sum_item(3)); + + let item3 = db + .get( + [TEST_LEAF, b"count_sum_key"].as_ref(), + b"item3", + None, + grove_version, + ) + .unwrap() + .expect("should get item3"); + assert_eq!(item3, Element::new_sum_item(5)); + + // Test aggregate data (count and sum) + let batch = StorageBatch::new(); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_sum_key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open CountSumTree"); + + let aggregate_data = merk + .aggregate_data() + .expect("expected to get aggregate data"); + + // Assuming AggregateData::CountAndSum is implemented + assert_eq!(aggregate_data, AggregateData::CountAndSum(3, 8)); // 3 items: 1, 3, 5 + } + + #[test] + fn test_count_sum_tree_item_behaves_like_regular_item() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Insert a CountSumTree with flags + db.insert( + [TEST_LEAF].as_ref(), + b"count_sum_key2", + Element::new_count_sum_tree_with_flags(None, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert CountSumTree with flags"); + + // Insert count and sum items + db.insert( + [TEST_LEAF, b"count_sum_key2"].as_ref(), + b"count_item", + Element::new_item(vec![2]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert count_item"); + + db.insert( + [TEST_LEAF, b"count_sum_key2"].as_ref(), + b"sum_item", + Element::new_sum_item(4), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert sum_item"); + + // Test aggregate data + let batch = StorageBatch::new(); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_sum_key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open CountSumTree with flags"); + + let aggregate_data = merk + .aggregate_data() + .expect("expected to get aggregate data"); + + assert_eq!(aggregate_data, AggregateData::CountAndSum(2, 4)); + } + + #[test] + fn test_homogenous_node_type_in_count_sum_trees_and_regular_trees() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Insert a CountSumTree with initial sum and count values + db.insert( + [TEST_LEAF].as_ref(), + b"count_sum_key3", + Element::new_count_sum_tree_with_flags_and_sum_and_count_value(None, 0, 0, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert CountSumTree with sum and count values"); + + // Add count and sum items + db.insert( + [TEST_LEAF, b"count_sum_key3"].as_ref(), + b"item1", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item1"); + + db.insert( + [TEST_LEAF, b"count_sum_key3"].as_ref(), + b"item2", + Element::new_sum_item(20), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item2"); + + // Add regular items + db.insert( + [TEST_LEAF, b"count_sum_key3"].as_ref(), + b"item3", + Element::new_item(vec![30]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item3"); + + db.insert( + [TEST_LEAF, b"count_sum_key3"].as_ref(), + b"item4", + Element::new_item(vec![40]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item4"); + + // Open merk and check all elements in it + let batch = StorageBatch::new(); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_sum_key3"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open CountSumTree"); + + // Verify feature types + let feature_type_item1 = merk + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + assert_eq!( + feature_type_item1, + TreeFeatureType::CountedSummedMerkNode(1, 0) + ); + + let feature_type_item2 = merk + .get_feature_type( + b"item2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + assert_eq!( + feature_type_item2, + TreeFeatureType::CountedSummedMerkNode(1, 20) + ); + + let feature_type_item3 = merk + .get_feature_type( + b"item3", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + assert_eq!( + feature_type_item3, + TreeFeatureType::CountedSummedMerkNode(1, 0) + ); + + let feature_type_item4 = merk + .get_feature_type( + b"item4", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + assert_eq!( + feature_type_item4, + TreeFeatureType::CountedSummedMerkNode(1, 0) + ); + + // Verify aggregate data + let aggregate_data = merk + .aggregate_data() + .expect("expected to get aggregate data"); + assert_eq!(aggregate_data, AggregateData::CountAndSum(4, 20)); // 2 count, 10 + 20 sum + } + + #[test] + fn test_count_sum_tree_feature() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Insert a regular tree + db.insert( + [TEST_LEAF].as_ref(), + b"regular_key", + Element::new_tree(None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert regular tree"); + + let batch = StorageBatch::new(); + + // Aggregate data should be None for regular tree + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"regular_key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open regular tree"); + assert_eq!( + merk.aggregate_data() + .expect("expected to get aggregate data"), + AggregateData::NoAggregateData + ); + + // Insert a CountSumTree + db.insert( + [TEST_LEAF].as_ref(), + b"count_sum_key4", + Element::new_count_sum_tree_with_flags_and_sum_and_count_value(None, 0, 0, None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert CountSumTree"); + + let count_sum_tree = db + .get([TEST_LEAF].as_ref(), b"count_sum_key4", None, grove_version) + .unwrap() + .expect("should retrieve CountSumTree"); + assert!(matches!(count_sum_tree, Element::CountSumTree(..))); + // Note: Directly accessing count_sum_value_or_default is not shown in original + // code. Assuming you have a method like this to extract count and sum + // from the Element. If not, rely on aggregate_data as below. + + // Add count and sum items + db.insert( + [TEST_LEAF, b"count_sum_key4"].as_ref(), + b"count_item1", + Element::new_item(vec![1]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert count_item1"); + + db.insert( + [TEST_LEAF, b"count_sum_key4"].as_ref(), + b"sum_item1", + Element::new_sum_item(5), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert sum_item1"); + + // Verify aggregate data + let batch = StorageBatch::new(); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_sum_key4"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open CountSumTree"); + + let aggregate_data = merk + .aggregate_data() + .expect("expected to get aggregate data"); + assert_eq!(aggregate_data, AggregateData::CountAndSum(2, 5)); + } + + #[test] + fn test_count_sum_tree_with_batches() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Prepare a batch of operations + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec()], + b"count_sum_key6".to_vec(), + Element::new_count_sum_tree(None), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"count_sum_key6".to_vec()], + b"a".to_vec(), + Element::new_item(vec![10]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"count_sum_key6".to_vec()], + b"b".to_vec(), + Element::new_sum_item(20), + ), + ]; + + // Apply the batch + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + // Open the CountSumTree and verify aggregate data + let batch = StorageBatch::new(); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_sum_key6"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open CountSumTree"); + + let aggregate_data = merk + .aggregate_data() + .expect("expected to get aggregate data"); + assert_eq!(aggregate_data, AggregateData::CountAndSum(2, 20)); + } + + #[test] + fn test_count_sum_tree_propagation() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + + // Insert a parent CountSumTree + db.insert( + [TEST_LEAF].as_ref(), + b"parent_count_sum", + Element::new_count_sum_tree(None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert parent CountSumTree"); + + // Insert a child CountSumTree within the parent + db.insert( + [TEST_LEAF, b"parent_count_sum"].as_ref(), + b"child_count_sum", + Element::new_count_sum_tree(None), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert child CountSumTree"); + + // Insert items into the child CountSumTree + db.insert( + [TEST_LEAF, b"parent_count_sum", b"child_count_sum"].as_ref(), + b"item1", + Element::new_item(vec![5]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item1 into child"); + + db.insert( + [TEST_LEAF, b"parent_count_sum", b"child_count_sum"].as_ref(), + b"item2", + Element::new_sum_item(15), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item2 into child"); + + // Verify aggregate data of child + let batch = StorageBatch::new(); + let child_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"parent_count_sum", b"child_count_sum"] + .as_ref() + .into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open child CountSumTree"); + + let child_aggregate = child_merk + .aggregate_data() + .expect("expected to get aggregate data"); + assert_eq!(child_aggregate, AggregateData::CountAndSum(2, 15)); + + // Verify aggregate data of parent + let parent_batch = StorageBatch::new(); + let parent_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"parent_count_sum"].as_ref().into(), + Some(&parent_batch), + grove_version, + ) + .unwrap() + .expect("should open parent CountSumTree"); + + let parent_aggregate = parent_merk + .aggregate_data() + .expect("expected to get aggregate data"); + assert_eq!(parent_aggregate, AggregateData::CountAndSum(2, 15)); + } +} diff --git a/grovedb/src/tests/count_tree_tests.rs b/grovedb/src/tests/count_tree_tests.rs new file mode 100644 index 00000000..e4dffc06 --- /dev/null +++ b/grovedb/src/tests/count_tree_tests.rs @@ -0,0 +1,852 @@ +//! Count tree tests + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use grovedb_merk::{ + proofs::Query, + tree::{kv::ValueDefinedCostType, AggregateData}, + TreeFeatureType::{BasicMerkNode, CountedMerkNode}, + }; + use grovedb_storage::StorageBatch; + use grovedb_version::version::GroveVersion; + + use crate::{ + batch::QualifiedGroveDbOp, + reference_path::ReferencePathType, + tests::{make_test_grovedb, TEST_LEAF}, + Element, GroveDb, PathQuery, + }; + + #[test] + fn test_count_tree_behaves_like_regular_tree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_count_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + + // Can fetch count tree + let count_tree = db + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .expect("should get count tree"); + assert!(matches!(count_tree, Element::CountTree(..))); + + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey", + Element::new_item(vec![1]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey2", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey3", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + // Test proper item retrieval + let item = db + .get( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey", + None, + grove_version, + ) + .unwrap() + .expect("should get item"); + assert_eq!(item, Element::new_item(vec![1])); + + // Test proof generation + let mut query = Query::new(); + query.insert_key(b"innerkey2".to_vec()); + + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"key".to_vec()], query); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .expect("should generate proof"); + let (root_hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 1); + assert_eq!( + Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"), + Element::new_item(vec![3]) + ); + } + + #[test] + fn test_homogenous_node_type_in_count_trees_and_regular_trees() { + let grove_version = GroveVersion::latest(); + // All elements in a count tree must have a count feature type + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_count_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + // Add count items + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item1", + Element::new_item(vec![30]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item2", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + // Add regular items + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item3", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item4", + Element::new_item(vec![15]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + let batch = StorageBatch::new(); + + // Open merk and check all elements in it + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + let feature_type_node_1 = merk + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + let feature_type_node_2 = merk + .get_feature_type( + b"item2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + let feature_type_node_3 = merk + .get_feature_type( + b"item3", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + let feature_type_node_4 = merk + .get_feature_type( + b"item4", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + + assert_eq!(feature_type_node_1, CountedMerkNode(1)); + assert_eq!(feature_type_node_2, CountedMerkNode(1)); + assert_eq!(feature_type_node_3, CountedMerkNode(1)); + assert_eq!(feature_type_node_4, CountedMerkNode(1)); + + // Perform the same test on regular trees + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item1", + Element::new_item(vec![30]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item2", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + merk.get_feature_type( + b"item1", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) + )); + assert!(matches!( + merk.get_feature_type( + b"item2", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) + )); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::NoAggregateData + ); + } + + #[test] + fn test_count_tree_feature() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + + let batch = StorageBatch::new(); + + // Sum should be non for non count tree + // TODO: change interface to retrieve element directly + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::NoAggregateData + ); + + // Add count tree + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_count_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert count tree"); + let count_tree = db + .get([TEST_LEAF].as_ref(), b"key2", None, grove_version) + .unwrap() + .expect("should retrieve tree"); + assert_eq!(count_tree.count_value_or_default(), 0); + + // Add count items to the count tree + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item1", + Element::new_item(vec![30]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + // TODO: change interface to retrieve element directly + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::Count(1) + ); + + // Add more count items + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item3", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::Count(3) + ); + + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + Element::new_item(vec![29]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::Count(4) + ); + + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item3", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::Count(4) + ); + + db.delete( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to delete"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get count"), + AggregateData::Count(3) + ); + } + + #[test] + fn test_count_tree_propagation() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + // Tree + // count_key: CountTree + // / \ + // countitem3 tree2: CountTree + // + // tree2 : CountTree + // / + // item1 item2 item3 ref1 + db.insert( + [TEST_LEAF].as_ref(), + b"count_key", + Element::empty_count_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"count_key"].as_ref(), + b"tree2", + Element::empty_count_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"count_key"].as_ref(), + b"countitem3", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"count_key", b"tree2"].as_ref(), + b"item1", + Element::new_item(vec![2]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"count_key", b"tree2"].as_ref(), + b"item2", + Element::new_item(vec![5]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"count_key", b"tree2"].as_ref(), + b"item3", + Element::new_item(vec![10]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"count_key", b"tree2"].as_ref(), + b"ref1", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"count_key".to_vec(), + b"tree2".to_vec(), + b"item1".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + let count_tree = db + .get([TEST_LEAF].as_ref(), b"count_key", None, grove_version) + .unwrap() + .expect("should fetch tree"); + assert_eq!(count_tree.count_value_or_default(), 5); + + let batch = StorageBatch::new(); + + // Assert node feature types + let test_leaf_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + let root_tree_feature_type = test_leaf_merk + .get_feature_type( + b"count_key", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("tree feature type"); + + assert_matches!(root_tree_feature_type, BasicMerkNode); + + let parent_count_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + let count_tree_feature_type = parent_count_tree + .get_feature_type( + b"tree2", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("tree feature type"); + assert_matches!(count_tree_feature_type, CountedMerkNode(4)); + + let child_count_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"count_key", b"tree2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + let count_tree_feature_type = child_count_tree + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("tree feature type"); + + assert_matches!(count_tree_feature_type, CountedMerkNode(1)); + + let count_tree_feature_type = child_count_tree + .get_feature_type( + b"item2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("tree feature type"); + + assert_matches!(count_tree_feature_type, CountedMerkNode(1)); + + let count_tree_feature_type = child_count_tree + .get_feature_type( + b"item3", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("tree feature type"); + + assert_matches!(count_tree_feature_type, CountedMerkNode(1)); + + let count_tree_feature_type = child_count_tree + .get_feature_type( + b"ref1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("tree feature type"); + + assert_matches!(count_tree_feature_type, CountedMerkNode(1)); + } + + #[test] + fn test_count_tree_with_batches() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec()], + b"key1".to_vec(), + Element::empty_count_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"a".to_vec(), + Element::new_item(vec![214]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"b".to_vec(), + Element::new_item(vec![10]), + ), + ]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let count_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + + let tree_feature_type_a = count_tree + .get_feature_type( + b"a", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected tree feature type"); + + let tree_feature_type_b = count_tree + .get_feature_type( + b"a", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected tree feature type"); + + assert_matches!(tree_feature_type_a, CountedMerkNode(1)); + assert_matches!(tree_feature_type_b, CountedMerkNode(1)); + + // Create new batch to use existing tree + let ops = vec![QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"c".to_vec(), + Element::new_item(vec![10]), + )]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let count_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + let tree_feature_type_c = count_tree + .get_feature_type( + b"c", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected tree feature type"); + assert_matches!(tree_feature_type_c, CountedMerkNode(1)); + assert_eq!( + count_tree.aggregate_data().expect("expected to get count"), + AggregateData::Count(3) + ); + + // Test propagation + // Add a new count tree with its own count items, should affect count of + // original tree + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"d".to_vec(), + Element::empty_count_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], + b"first".to_vec(), + Element::new_item(vec![2]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], + b"second".to_vec(), + Element::new_item(vec![4]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"e".to_vec(), + Element::empty_count_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"first".to_vec(), + Element::new_item(vec![3]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"second".to_vec(), + Element::new_item(vec![4]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"third".to_vec(), + Element::empty_count_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"e".to_vec(), + b"third".to_vec(), + ], + b"a".to_vec(), + Element::new_item(vec![5]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"e".to_vec(), + b"third".to_vec(), + ], + b"b".to_vec(), + Element::new_item(vec![5]), + ), + ]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let count_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + count_tree.aggregate_data().expect("expected to get count"), + AggregateData::Count(9) + ); + } +} diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index 41263669..a7f01eb7 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -6,6 +6,8 @@ mod query_tests; mod sum_tree_tests; +mod count_sum_tree_tests; +mod count_tree_tests; mod tree_hashes_tests; use std::{ @@ -3213,14 +3215,14 @@ mod tests { let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); let root_merk = Merk::open_base( storage, - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) .unwrap() .expect("expected to get root merk"); let (_, root_key, _) = root_merk - .root_hash_key_and_sum() + .root_hash_key_and_aggregate_data() .unwrap() .expect("expected to get root hash, key and sum"); assert!(root_key.is_some()) @@ -3318,7 +3320,7 @@ mod tests { let subtree = Merk::open_layered_with_root_key( subtree_storage, Some(b"key3".to_vec()), - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -3367,7 +3369,7 @@ mod tests { let subtree = Merk::open_layered_with_root_key( subtree_storage, Some(b"key4".to_vec()), - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) @@ -3387,7 +3389,7 @@ mod tests { let subtree = Merk::open_layered_with_root_key( subtree_storage, Some(b"key3".to_vec()), - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), grove_version, ) diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index b255f653..777fcb45 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -1,730 +1,285 @@ //! Sum tree tests -use grovedb_merk::{ - proofs::Query, - tree::kv::ValueDefinedCostType, - TreeFeatureType::{BasicMerkNode, SummedMerkNode}, -}; -use grovedb_storage::StorageBatch; -use grovedb_version::version::GroveVersion; - -use crate::{ - batch::QualifiedGroveDbOp, - reference_path::ReferencePathType, - tests::{make_test_grovedb, TEST_LEAF}, - Element, Error, GroveDb, PathQuery, -}; - -#[test] -fn test_sum_tree_behaves_like_regular_tree() { - let grove_version = GroveVersion::latest(); - let db = make_test_grovedb(grove_version); - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::empty_sum_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - - // Can fetch sum tree - let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key", None, grove_version) - .unwrap() - .expect("should get tree"); - assert!(matches!(sum_tree, Element::SumTree(..))); - - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"innerkey", - Element::new_item(vec![1]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"innerkey2", - Element::new_item(vec![3]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"innerkey3", - Element::empty_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - - // Test proper item retrieval - let item = db - .get( +#[cfg(test)] +mod tests { + use grovedb_merk::{ + proofs::Query, + tree::{kv::ValueDefinedCostType, AggregateData}, + TreeFeatureType::{BasicMerkNode, BigSummedMerkNode, SummedMerkNode}, + }; + use grovedb_storage::StorageBatch; + use grovedb_version::version::GroveVersion; + + use crate::{ + batch::QualifiedGroveDbOp, + element::SumValue, + reference_path::ReferencePathType, + tests::{make_test_grovedb, TEST_LEAF}, + Element, Error, GroveDb, PathQuery, + }; + + #[test] + fn test_sum_tree_behaves_like_regular_tree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + + // Can fetch sum tree + let sum_tree = db + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .expect("should get tree"); + assert!(matches!(sum_tree, Element::SumTree(..))); + + db.insert( [TEST_LEAF, b"key"].as_ref(), b"innerkey", + Element::new_item(vec![1]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey2", + Element::new_item(vec![3]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey3", + Element::empty_tree(), + None, None, grove_version, ) .unwrap() - .expect("should get item"); - assert_eq!(item, Element::new_item(vec![1])); + .expect("should insert item"); - // Test proof generation - let mut query = Query::new(); - query.insert_key(b"innerkey2".to_vec()); + // Test proper item retrieval + let item = db + .get( + [TEST_LEAF, b"key"].as_ref(), + b"innerkey", + None, + grove_version, + ) + .unwrap() + .expect("should get item"); + assert_eq!(item, Element::new_item(vec![1])); - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"key".to_vec()], query); - let proof = db - .prove_query(&path_query, None, grove_version) - .unwrap() - .expect("should generate proof"); - let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query, grove_version).expect("should verify proof"); - assert_eq!( - root_hash, - db.grove_db.root_hash(None, grove_version).unwrap().unwrap() - ); - assert_eq!(result_set.len(), 1); - assert_eq!( - Element::deserialize(&result_set[0].value, grove_version) - .expect("should deserialize element"), - Element::new_item(vec![3]) - ); -} + // Test proof generation + let mut query = Query::new(); + query.insert_key(b"innerkey2".to_vec()); -#[test] -fn test_sum_item_behaves_like_regular_item() { - let grove_version = GroveVersion::latest(); - let db = make_test_grovedb(grove_version); - db.insert( - [TEST_LEAF].as_ref(), - b"sumkey", - Element::empty_sum_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"sumkey"].as_ref(), - b"k1", - Element::new_item(vec![1]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"sumkey"].as_ref(), - b"k2", - Element::new_sum_item(5), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"sumkey"].as_ref(), - b"k3", - Element::empty_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - - // Test proper item retrieval - let item = db - .get([TEST_LEAF, b"sumkey"].as_ref(), b"k2", None, grove_version) - .unwrap() - .expect("should get item"); - assert_eq!(item, Element::new_sum_item(5)); - - // Test proof generation - let mut query = Query::new(); - query.insert_key(b"k2".to_vec()); - - let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"sumkey".to_vec()], query); - let proof = db - .prove_query(&path_query, None, grove_version) - .unwrap() - .expect("should generate proof"); - let (root_hash, result_set) = - GroveDb::verify_query_raw(&proof, &path_query, grove_version).expect("should verify proof"); - assert_eq!( - root_hash, - db.grove_db.root_hash(None, grove_version).unwrap().unwrap() - ); - assert_eq!(result_set.len(), 1); - let element_from_proof = Element::deserialize(&result_set[0].value, grove_version) - .expect("should deserialize element"); - assert_eq!(element_from_proof, Element::new_sum_item(5)); - assert_eq!(element_from_proof.sum_value_or_default(), 5); -} + let path_query = PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"key".to_vec()], query); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .expect("should generate proof"); + let (root_hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 1); + assert_eq!( + Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"), + Element::new_item(vec![3]) + ); + } -#[test] -fn test_cannot_insert_sum_item_in_regular_tree() { - let grove_version = GroveVersion::latest(); - let db = make_test_grovedb(grove_version); - db.insert( - [TEST_LEAF].as_ref(), - b"sumkey", - Element::empty_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - assert!(matches!( + #[test] + fn test_sum_item_behaves_like_regular_item() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"sumkey", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); db.insert( [TEST_LEAF, b"sumkey"].as_ref(), b"k1", + Element::new_item(vec![1]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"sumkey"].as_ref(), + b"k2", Element::new_sum_item(5), None, None, - grove_version + grove_version, ) - .unwrap(), - Err(Error::InvalidInput("cannot add sum item to non sum tree")) - )); -} - -#[test] -fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { - let grove_version = GroveVersion::latest(); - // All elements in a sum tree must have a summed feature type - let db = make_test_grovedb(grove_version); - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::empty_sum_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - // Add sum items - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"item1", - Element::new_sum_item(30), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"item2", - Element::new_sum_item(10), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - // Add regular items - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"item3", - Element::new_item(vec![10]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"item4", - Element::new_item(vec![15]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - - let batch = StorageBatch::new(); - - // Open merk and check all elements in it - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert!(matches!( - merk.get_feature_type( - b"item1", - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"sumkey"].as_ref(), + b"k3", + Element::empty_tree(), + None, + None, + grove_version, ) .unwrap() - .expect("node should exist"), - Some(SummedMerkNode(30)) - )); - assert!(matches!( - merk.get_feature_type( - b"item2", - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version + .expect("should insert tree"); + + // Test proper item retrieval + let item = db + .get([TEST_LEAF, b"sumkey"].as_ref(), b"k2", None, grove_version) + .unwrap() + .expect("should get item"); + assert_eq!(item, Element::new_sum_item(5)); + + // Test proof generation + let mut query = Query::new(); + query.insert_key(b"k2".to_vec()); + + let path_query = + PathQuery::new_unsized(vec![TEST_LEAF.to_vec(), b"sumkey".to_vec()], query); + let proof = db + .prove_query(&path_query, None, grove_version) + .unwrap() + .expect("should generate proof"); + let (root_hash, result_set) = GroveDb::verify_query_raw(&proof, &path_query, grove_version) + .expect("should verify proof"); + assert_eq!( + root_hash, + db.grove_db.root_hash(None, grove_version).unwrap().unwrap() + ); + assert_eq!(result_set.len(), 1); + let element_from_proof = Element::deserialize(&result_set[0].value, grove_version) + .expect("should deserialize element"); + assert_eq!(element_from_proof, Element::new_sum_item(5)); + assert_eq!(element_from_proof.sum_value_or_default(), 5); + } + + #[test] + fn test_cannot_insert_sum_item_in_regular_tree() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"sumkey", + Element::empty_tree(), + None, + None, + grove_version, ) .unwrap() - .expect("node should exist"), - Some(SummedMerkNode(10)) - )); - assert!(matches!( - merk.get_feature_type( - b"item3", - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version + .expect("should insert tree"); + assert!(matches!( + db.insert( + [TEST_LEAF, b"sumkey"].as_ref(), + b"k1", + Element::new_sum_item(5), + None, + None, + grove_version + ) + .unwrap(), + Err(Error::InvalidInput("cannot add sum item to non sum tree")) + )); + } + + #[test] + fn test_homogenous_node_type_in_sum_trees_and_regular_trees() { + let grove_version = GroveVersion::latest(); + // All elements in a sum tree must have a summed feature type + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_sum_tree(), + None, + None, + grove_version, ) .unwrap() - .expect("node should exist"), - Some(SummedMerkNode(0)) - )); - assert!(matches!( - merk.get_feature_type( - b"item4", - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version - ) - .unwrap() - .expect("node should exist"), - Some(SummedMerkNode(0)) - )); - assert_eq!(merk.sum().expect("expected to get sum"), Some(40)); - - // Perform the same test on regular trees - let db = make_test_grovedb(grove_version); - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::empty_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"item1", - Element::new_item(vec![30]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"item2", - Element::new_item(vec![10]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert!(matches!( - merk.get_feature_type( + .expect("should insert tree"); + // Add sum items + db.insert( + [TEST_LEAF, b"key"].as_ref(), b"item1", - true, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version + Element::new_sum_item(30), + None, + None, + grove_version, ) .unwrap() - .expect("node should exist"), - Some(BasicMerkNode) - )); - assert!(matches!( - merk.get_feature_type( + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), b"item2", - true, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version + Element::new_sum_item(10), + None, + None, + grove_version, ) .unwrap() - .expect("node should exist"), - Some(BasicMerkNode) - )); - assert_eq!(merk.sum().expect("expected to get sum"), None); -} - -#[test] -fn test_sum_tree_feature() { - let grove_version = GroveVersion::latest(); - let db = make_test_grovedb(grove_version); - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::empty_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - - let batch = StorageBatch::new(); - - // Sum should be non for non sum tree - // TODO: change interface to retrieve element directly - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert_eq!(merk.sum().expect("expected to get sum"), None); - - // Add sum tree - db.insert( - [TEST_LEAF].as_ref(), - b"key2", - Element::empty_sum_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert sum tree"); - let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key2", None, grove_version) - .unwrap() - .expect("should retrieve tree"); - assert_eq!(sum_tree.sum_value_or_default(), 0); - - // Add sum items to the sum tree - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item1", - Element::new_sum_item(30), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - // TODO: change interface to retrieve element directly - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key2"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert_eq!(merk.sum().expect("expected to get sum"), Some(30)); - - // Add more sum items - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item2", - Element::new_sum_item(-10), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item3", - Element::new_sum_item(50), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key2"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert_eq!(merk.sum().expect("expected to get sum"), Some(70)); // 30 - 10 + 50 = 70 - - // Add non sum items, result should remain the same - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item4", - Element::new_item(vec![29]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key2"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert_eq!(merk.sum().expect("expected to get sum"), Some(70)); - - // Update existing sum items - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item2", - Element::new_sum_item(10), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item3", - Element::new_sum_item(-100), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key2"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert_eq!(merk.sum().expect("expected to get sum"), Some(-60)); // 30 + 10 - 100 = -60 - - // We can not replace a normal item with a sum item, so let's delete it first - db.delete( - [TEST_LEAF, b"key2"].as_ref(), - b"item4", - None, - None, - grove_version, - ) - .unwrap() - .expect("expected to delete"); - // Use a large value - db.insert( - [TEST_LEAF, b"key2"].as_ref(), - b"item4", - Element::new_sum_item(10000000), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - let merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key2"].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert_eq!(merk.sum().expect("expected to get sum"), Some(9999940)); // 30 + - // 10 - - // 100 + - // 10000000 - - // TODO: Test out overflows -} - -#[test] -fn test_sum_tree_propagation() { - let grove_version = GroveVersion::latest(); - let db = make_test_grovedb(grove_version); - // Tree - // SumTree - // SumTree - // Item1 - // SumItem1 - // SumItem2 - // SumItem3 - db.insert( - [TEST_LEAF].as_ref(), - b"key", - Element::empty_sum_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"tree2", - Element::empty_sum_tree(), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"key"].as_ref(), - b"sumitem3", - Element::new_sum_item(20), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert tree"); - db.insert( - [TEST_LEAF, b"key", b"tree2"].as_ref(), - b"item1", - Element::new_item(vec![2]), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key", b"tree2"].as_ref(), - b"sumitem1", - Element::new_sum_item(5), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key", b"tree2"].as_ref(), - b"sumitem2", - Element::new_sum_item(10), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - db.insert( - [TEST_LEAF, b"key", b"tree2"].as_ref(), - b"item2", - Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ - TEST_LEAF.to_vec(), - b"key".to_vec(), - b"tree2".to_vec(), - b"sumitem1".to_vec(), - ])), - None, - None, - grove_version, - ) - .unwrap() - .expect("should insert item"); - - let sum_tree = db - .get([TEST_LEAF].as_ref(), b"key", None, grove_version) - .unwrap() - .expect("should fetch tree"); - assert_eq!(sum_tree.sum_value_or_default(), 35); - - let batch = StorageBatch::new(); - - // Assert node feature types - let test_leaf_merk = db - .open_non_transactional_merk_at_path( - [TEST_LEAF].as_ref().into(), - Some(&batch), - grove_version, - ) - .unwrap() - .expect("should open tree"); - assert!(matches!( - test_leaf_merk - .get_feature_type( - b"key", - true, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version - ) - .unwrap() - .expect("node should exist"), - Some(BasicMerkNode) - )); - - let parent_sum_tree = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key"].as_ref().into(), - Some(&batch), + .expect("should insert item"); + // Add regular items + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item3", + Element::new_item(vec![10]), + None, + None, grove_version, ) .unwrap() - .expect("should open tree"); - assert!(matches!( - parent_sum_tree - .get_feature_type( - b"tree2", - true, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version - ) - .unwrap() - .expect("node should exist"), - Some(SummedMerkNode(15)) /* 15 because the child sum tree has one sum item of - * value 5 and - * another of value 10 */ - )); - - let child_sum_tree = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key", b"tree2"].as_ref().into(), - Some(&batch), + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item4", + Element::new_item(vec![15]), + None, + None, grove_version, ) .unwrap() - .expect("should open tree"); - assert!(matches!( - child_sum_tree - .get_feature_type( + .expect("should insert item"); + + let batch = StorageBatch::new(); + + // Open merk and check all elements in it + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + merk.get_feature_type( b"item1", true, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -732,213 +287,1383 @@ fn test_sum_tree_propagation() { ) .unwrap() .expect("node should exist"), - Some(SummedMerkNode(0)) - )); - assert!(matches!( - child_sum_tree - .get_feature_type( - b"sumitem1", + Some(SummedMerkNode(30)) + )); + assert!(matches!( + merk.get_feature_type( + b"item2", true, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version ) .unwrap() .expect("node should exist"), - Some(SummedMerkNode(5)) - )); - assert!(matches!( - child_sum_tree - .get_feature_type( - b"sumitem2", + Some(SummedMerkNode(10)) + )); + assert!(matches!( + merk.get_feature_type( + b"item3", true, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version ) .unwrap() .expect("node should exist"), - Some(SummedMerkNode(10)) - )); - - // TODO: should references take the sum of the referenced element?? - assert!(matches!( - child_sum_tree - .get_feature_type( - b"item2", + Some(SummedMerkNode(0)) + )); + assert!(matches!( + merk.get_feature_type( + b"item4", true, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version ) .unwrap() .expect("node should exist"), - Some(SummedMerkNode(0)) - )); -} + Some(SummedMerkNode(0)) + )); + assert_eq!( + merk.aggregate_data() + .expect("expected to get sum") + .as_sum_i64(), + 40 + ); -#[test] -fn test_sum_tree_with_batches() { - let grove_version = GroveVersion::latest(); - let db = make_test_grovedb(grove_version); - let ops = vec![ - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec()], - b"key1".to_vec(), - Element::empty_sum_tree(), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - b"a".to_vec(), - Element::new_item(vec![214]), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - b"b".to_vec(), - Element::new_sum_item(10), - ), - ]; - db.apply_batch(ops, None, None, grove_version) + // Perform the same test on regular trees + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("should apply batch"); - - let batch = StorageBatch::new(); - let sum_tree = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key1"].as_ref().into(), - Some(&batch), + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item1", + Element::new_item(vec![30]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"item2", + Element::new_item(vec![10]), + None, + None, grove_version, ) .unwrap() - .expect("should open tree"); + .expect("should insert item"); - assert!(matches!( - sum_tree - .get_feature_type( - b"a", + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + merk.get_feature_type( + b"item1", true, Some(&Element::value_defined_cost_for_serialized_value), grove_version ) .unwrap() .expect("node should exist"), - Some(SummedMerkNode(0)) - )); - assert!(matches!( - sum_tree - .get_feature_type( - b"b", + Some(BasicMerkNode) + )); + assert!(matches!( + merk.get_feature_type( + b"item2", true, Some(&Element::value_defined_cost_for_serialized_value), grove_version ) .unwrap() .expect("node should exist"), - Some(SummedMerkNode(10)) - )); + Some(BasicMerkNode) + )); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::NoAggregateData + ); + } - // Create new batch to use existing tree - let ops = vec![QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - b"c".to_vec(), - Element::new_sum_item(10), - )]; - db.apply_batch(ops, None, None, grove_version) + #[test] + fn test_sum_tree_feature() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_tree(), + None, + None, + grove_version, + ) .unwrap() - .expect("should apply batch"); + .expect("should insert tree"); + + let batch = StorageBatch::new(); + + // Sum should be non for non sum tree + // TODO: change interface to retrieve element directly + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::NoAggregateData + ); - let batch = StorageBatch::new(); - let sum_tree = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key1"].as_ref().into(), - Some(&batch), + // Add sum tree + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_sum_tree(), + None, + None, grove_version, ) .unwrap() - .expect("should open tree"); - assert!(matches!( - sum_tree - .get_feature_type( - b"c", - true, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version + .expect("should insert sum tree"); + let sum_tree = db + .get([TEST_LEAF].as_ref(), b"key2", None, grove_version) + .unwrap() + .expect("should retrieve tree"); + assert_eq!(sum_tree.sum_value_or_default(), 0); + + // Add sum items to the sum tree + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item1", + Element::new_sum_item(30), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + // TODO: change interface to retrieve element directly + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, ) .unwrap() - .expect("node should exist"), - Some(SummedMerkNode(10)) - )); - assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(20)); - - // Test propagation - // Add a new sum tree with its own sum items, should affect sum of original - // tree - let ops = vec![ - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - b"d".to_vec(), - Element::empty_sum_tree(), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], - b"first".to_vec(), - Element::new_sum_item(4), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], - b"second".to_vec(), - Element::new_item(vec![4]), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec()], - b"e".to_vec(), - Element::empty_sum_tree(), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], - b"first".to_vec(), - Element::new_sum_item(12), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], - b"second".to_vec(), - Element::new_item(vec![4]), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], - b"third".to_vec(), - Element::empty_sum_tree(), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - b"e".to_vec(), - b"third".to_vec(), - ], - b"a".to_vec(), - Element::new_sum_item(5), - ), - QualifiedGroveDbOp::insert_or_replace_op( - vec![ - TEST_LEAF.to_vec(), - b"key1".to_vec(), - b"e".to_vec(), - b"third".to_vec(), - ], - b"b".to_vec(), - Element::new_item(vec![5]), - ), - ]; - db.apply_batch(ops, None, None, grove_version) + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(30) + ); + + // Add more sum items + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_sum_item(-10), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item3", + Element::new_sum_item(50), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(70) + ); // 30 - 10 + 50 = 70 + + // Add non sum items, result should remain the same + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + Element::new_item(vec![29]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(70) + ); + + // Update existing sum items + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_sum_item(10), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item3", + Element::new_sum_item(-100), + None, + None, + grove_version, + ) .unwrap() - .expect("should apply batch"); + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(-60) + ); // 30 + 10 - 100 = -60 - let batch = StorageBatch::new(); - let sum_tree = db - .open_non_transactional_merk_at_path( - [TEST_LEAF, b"key1"].as_ref().into(), - Some(&batch), + // We can not replace a normal item with a sum item, so let's delete it first + db.delete( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + None, + None, + grove_version, + ) + .unwrap() + .expect("expected to delete"); + // Use a large value + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + Element::new_sum_item(10000000), + None, + None, grove_version, ) .unwrap() - .expect("should open tree"); - assert_eq!(sum_tree.sum().expect("expected to get sum"), Some(41)); + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(9999940) + ); // 30 + + // 10 - + // 100 + + // 10000000 + } + + #[test] + fn test_sum_tree_overflow() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + + let batch = StorageBatch::new(); + + // Sum should be non for non sum tree + // TODO: change interface to retrieve element directly + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::NoAggregateData + ); + + // Add sum tree + db.insert( + [TEST_LEAF].as_ref(), + b"key2", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert sum tree"); + let sum_tree = db + .get([TEST_LEAF].as_ref(), b"key2", None, grove_version) + .unwrap() + .expect("should retrieve tree"); + assert_eq!(sum_tree.sum_value_or_default(), 0); + + // Add sum items to the sum tree + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item1", + Element::new_sum_item(SumValue::MAX), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + // TODO: change interface to retrieve element directly + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(SumValue::MAX) + ); + + // Subtract 10 from Max should work + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_sum_item(-10), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(SumValue::MAX - 10) + ); + + // Add 20 from Max should overflow + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item3", + Element::new_sum_item(20), + None, + None, + grove_version, + ) + .unwrap() + .expect_err("should not be able to insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(SumValue::MAX - 10) + ); + + // Add non sum items, result should remain the same + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item4", + Element::new_item(vec![29]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(SumValue::MAX - 10) + ); + + // Update existing sum item will overflow + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_sum_item(10), // we are replacing -10 with 10 + None, + None, + grove_version, + ) + .unwrap() + .expect_err("should not be able to insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(SumValue::MAX - 10) + ); + + // Update existing sum item will overflow + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item2", + Element::new_sum_item(SumValue::MIN), // we are replacing -10 with SumValue::MIN + None, + None, + grove_version, + ) + .unwrap() + .expect("should be able to insert item"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(-1) + ); + + db.insert( + [TEST_LEAF, b"key2"].as_ref(), + b"item3", + Element::new_sum_item(-40), + None, + None, + grove_version, + ) + .unwrap() + .expect("should be able to insert item"); + + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(-41) + ); + + // Deleting item1 should make us overflow + db.delete( + [TEST_LEAF, b"key2"].as_ref(), + b"item1", + None, + None, + grove_version, + ) + .unwrap() + .expect_err("expected not be able to delete"); + let merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + merk.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(-41) + ); + } + + #[test] + fn test_sum_tree_propagation() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + // Tree + // SumTree + // SumTree + // Item1 + // SumItem1 + // SumItem2 + // SumItem3 + db.insert( + [TEST_LEAF].as_ref(), + b"key", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"tree2", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"key"].as_ref(), + b"sumitem3", + Element::new_sum_item(20), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"key", b"tree2"].as_ref(), + b"item1", + Element::new_item(vec![2]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key", b"tree2"].as_ref(), + b"sumitem1", + Element::new_sum_item(5), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key", b"tree2"].as_ref(), + b"sumitem2", + Element::new_sum_item(10), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"key", b"tree2"].as_ref(), + b"item2", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"key".to_vec(), + b"tree2".to_vec(), + b"sumitem1".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + let sum_tree = db + .get([TEST_LEAF].as_ref(), b"key", None, grove_version) + .unwrap() + .expect("should fetch tree"); + assert_eq!(sum_tree.sum_value_or_default(), 35); + + let batch = StorageBatch::new(); + + // Assert node feature types + let test_leaf_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + test_leaf_merk + .get_feature_type( + b"key", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) + )); + + let parent_sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + parent_sum_tree + .get_feature_type( + b"tree2", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(15)) /* 15 because the child sum tree has one sum item of + * value 5 and + * another of value 10 */ + )); + + let child_sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key", b"tree2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + child_sum_tree + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) + )); + assert!(matches!( + child_sum_tree + .get_feature_type( + b"sumitem1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(5)) + )); + assert!(matches!( + child_sum_tree + .get_feature_type( + b"sumitem2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(10)) + )); + + // TODO: should references take the sum of the referenced element?? + assert!(matches!( + child_sum_tree + .get_feature_type( + b"item2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) + )); + } + + #[test] + fn test_big_sum_tree_propagation() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + // Tree + // BigSumTree + // SumTree1 + // SumItem1 + // SumItem2 + // SumTree2 + // SumItem3 + // SumItem4 + db.insert( + [TEST_LEAF].as_ref(), + b"big_sum_tree", + Element::empty_big_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"big_sum_tree"].as_ref(), + b"sum_tree_1", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"big_sum_tree"].as_ref(), + b"sum_tree_2", + Element::empty_sum_tree(), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert tree"); + db.insert( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref(), + b"item1", + Element::new_item(vec![2]), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref(), + b"sum_item_1", + Element::new_sum_item(SumValue::MAX - 40), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref(), + b"sum_item_2", + Element::new_sum_item(30), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + db.insert( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref(), + b"ref_1", + Element::new_reference(ReferencePathType::AbsolutePathReference(vec![ + TEST_LEAF.to_vec(), + b"big_sum_tree".to_vec(), + b"sum_tree_1".to_vec(), + b"sum_item_1".to_vec(), + ])), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + db.insert( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_2"].as_ref(), + b"sum_item_3", + Element::new_sum_item(SumValue::MAX - 50), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + let sum_tree = db + .get([TEST_LEAF].as_ref(), b"big_sum_tree", None, grove_version) + .unwrap() + .expect("should fetch tree"); + assert_eq!( + sum_tree.big_sum_value_or_default(), + (SumValue::MAX - 10) as i128 + (SumValue::MAX - 50) as i128 + ); + + db.insert( + [TEST_LEAF, b"big_sum_tree"].as_ref(), + b"sum_item_4", + Element::new_sum_item(SumValue::MAX - 70), + None, + None, + grove_version, + ) + .unwrap() + .expect("should insert item"); + + let sum_tree = db + .get([TEST_LEAF].as_ref(), b"big_sum_tree", None, grove_version) + .unwrap() + .expect("should fetch tree"); + assert_eq!( + sum_tree.big_sum_value_or_default(), + (SumValue::MAX - 10) as i128 + + (SumValue::MAX - 50) as i128 + + (SumValue::MAX - 70) as i128 + ); + + let batch = StorageBatch::new(); + + // Assert node feature types + let test_leaf_merk = db + .open_non_transactional_merk_at_path( + [TEST_LEAF].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert!(matches!( + test_leaf_merk + .get_feature_type( + b"big_sum_tree", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BasicMerkNode) + )); + + let parent_sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"big_sum_tree"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + let feature_type = parent_sum_tree + .get_feature_type( + b"sum_tree_1", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + assert_eq!( + feature_type, + BigSummedMerkNode((SumValue::MAX - 10) as i128) + ); + + let feature_type = parent_sum_tree + .get_feature_type( + b"sum_item_4", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .unwrap() + .expect("node should exist") + .expect("expected feature type"); + assert_eq!( + feature_type, + BigSummedMerkNode((SumValue::MAX - 70) as i128) + ); + + let child_sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + child_sum_tree + .get_feature_type( + b"item1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) + ); + assert_eq!( + child_sum_tree + .get_feature_type( + b"sum_item_1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(SumValue::MAX - 40)) + ); + assert_eq!( + child_sum_tree + .get_feature_type( + b"sum_item_2", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(30)) + ); + + assert_eq!( + child_sum_tree + .get_feature_type( + b"ref_1", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) + ); + + let child_sum_tree_2 = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"big_sum_tree", b"sum_tree_2"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + + assert_eq!( + child_sum_tree_2 + .get_feature_type( + b"sum_item_3", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(SumValue::MAX - 50)) + ); + } + + #[test] + fn test_sum_tree_with_batches() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec()], + b"key1".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"a".to_vec(), + Element::new_item(vec![214]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"b".to_vec(), + Element::new_sum_item(10), + ), + ]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + + assert_eq!( + sum_tree + .get_feature_type( + b"a", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(0)) + ); + assert_eq!( + sum_tree + .get_feature_type( + b"b", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(10)) + ); + + // Create new batch to use existing tree + let ops = vec![QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"c".to_vec(), + Element::new_sum_item(10), + )]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + sum_tree + .get_feature_type( + b"c", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(SummedMerkNode(10)) + ); + assert_eq!( + sum_tree.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(20) + ); + + // Test propagation + // Add a new sum tree with its own sum items, should affect sum of original + // tree + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"d".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], + b"first".to_vec(), + Element::new_sum_item(4), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], + b"second".to_vec(), + Element::new_item(vec![4]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"e".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"first".to_vec(), + Element::new_sum_item(12), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"second".to_vec(), + Element::new_item(vec![4]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"third".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"e".to_vec(), + b"third".to_vec(), + ], + b"a".to_vec(), + Element::new_sum_item(5), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"e".to_vec(), + b"third".to_vec(), + ], + b"b".to_vec(), + Element::new_item(vec![5]), + ), + ]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + sum_tree.aggregate_data().expect("expected to get sum"), + AggregateData::Sum(41) + ); + } + + #[test] + fn test_big_sum_tree_with_batches() { + let grove_version = GroveVersion::latest(); + let db = make_test_grovedb(grove_version); + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec()], + b"key1".to_vec(), + Element::empty_big_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"a".to_vec(), + Element::new_item(vec![214]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"b".to_vec(), + Element::new_sum_item(10), + ), + ]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + + assert_eq!( + sum_tree + .get_feature_type( + b"a", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BigSummedMerkNode(0)) + ); + assert_eq!( + sum_tree + .get_feature_type( + b"b", + true, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BigSummedMerkNode(10)) + ); + + // Create new batch to use existing tree + let ops = vec![QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"c".to_vec(), + Element::new_sum_item(10), + )]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + sum_tree + .get_feature_type( + b"c", + true, + None::<&fn(&[u8], &GroveVersion) -> Option>, + grove_version + ) + .unwrap() + .expect("node should exist"), + Some(BigSummedMerkNode(10)) + ); + assert_eq!( + sum_tree.aggregate_data().expect("expected to get sum"), + AggregateData::BigSum(20) + ); + + // Test propagation + // Add a new sum tree with its own sum items, should affect sum of original + // tree + let ops = vec![ + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"d".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], + b"first".to_vec(), + Element::new_sum_item(4), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"d".to_vec()], + b"second".to_vec(), + Element::new_item(vec![4]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec()], + b"e".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"first".to_vec(), + Element::new_sum_item(12), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"second".to_vec(), + Element::new_item(vec![4]), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![TEST_LEAF.to_vec(), b"key1".to_vec(), b"e".to_vec()], + b"third".to_vec(), + Element::empty_sum_tree(), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"e".to_vec(), + b"third".to_vec(), + ], + b"a".to_vec(), + Element::new_sum_item(5), + ), + QualifiedGroveDbOp::insert_or_replace_op( + vec![ + TEST_LEAF.to_vec(), + b"key1".to_vec(), + b"e".to_vec(), + b"third".to_vec(), + ], + b"b".to_vec(), + Element::new_item(vec![5]), + ), + ]; + db.apply_batch(ops, None, None, grove_version) + .unwrap() + .expect("should apply batch"); + + let batch = StorageBatch::new(); + let sum_tree = db + .open_non_transactional_merk_at_path( + [TEST_LEAF, b"key1"].as_ref().into(), + Some(&batch), + grove_version, + ) + .unwrap() + .expect("should open tree"); + assert_eq!( + sum_tree.aggregate_data().expect("expected to get sum"), + AggregateData::BigSum(41) + ); + } } diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index b9b624a4..20ec46d8 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -28,7 +28,7 @@ macro_rules! storage_context_with_parent_optional_tx { $transaction:ident, $storage:ident, $root_key:ident, - $is_sum_tree:ident, + $tree_type:ident, $grove_version:ident, { $($body:tt)* } ) => { @@ -54,24 +54,14 @@ macro_rules! storage_context_with_parent_optional_tx { ) }) ); - match element { - Element::Tree(root_key, _) => { - let $root_key = root_key; - let $is_sum_tree = false; - $($body)* - } - Element::SumTree(root_key, ..) => { - let $root_key = root_key; - let $is_sum_tree = true; - $($body)* - } - _ => { - return Err(Error::CorruptedData( + let Some(($root_key, $tree_type)) = element.root_key_and_tree_type_owned() else + { + return Err(Error::CorruptedData( "parent is not a tree" .to_owned(), )).wrap_with_cost($cost); - } - } + }; + $($body)* } else { return Err(Error::CorruptedData( "path is empty".to_owned(), @@ -95,24 +85,14 @@ macro_rules! storage_context_with_parent_optional_tx { ) }) ); - match element { - Element::Tree(root_key, _) => { - let $root_key = root_key; - let $is_sum_tree = false; - $($body)* - } - Element::SumTree(root_key, ..) => { - let $root_key = root_key; - let $is_sum_tree = true; - $($body)* - } - _ => { - return Err(Error::CorruptedData( + let Some(($root_key, $tree_type)) = element.root_key_and_tree_type_owned() else + { + return Err(Error::CorruptedData( "parent is not a tree" .to_owned(), )).wrap_with_cost($cost); - } - } + }; + $($body)* } else { return Err(Error::CorruptedData( "path is empty".to_owned(), @@ -134,7 +114,7 @@ macro_rules! storage_context_with_parent_optional_tx_internal_error { $transaction:ident, $storage:ident, $root_key:ident, - $is_sum_tree:ident, + $tree_type:ident, $grove_version:ident, { $($body:tt)* } ) => { @@ -162,24 +142,15 @@ macro_rules! storage_context_with_parent_optional_tx_internal_error { }).unwrap_add_cost(&mut $cost); match result { Ok(element) => { - match element { - Element::Tree(root_key, _) => { - let $root_key = root_key; - let $is_sum_tree = false; - $($body)* - } - Element::SumTree(root_key, ..) => { - let $root_key = root_key; - let $is_sum_tree = true; - $($body)* - } - _ => { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - } - } + let Some(($root_key, $tree_type)) + = element.root_key_and_tree_type_owned() else + { + return Err(Error::CorruptedData( + "parent is not a tree" + .to_owned(), + )).wrap_with_cost($cost); + }; + $($body)* }, Err(e) => Err(e), } @@ -210,24 +181,15 @@ macro_rules! storage_context_with_parent_optional_tx_internal_error { }).unwrap_add_cost(&mut $cost); match result { Ok(element) => { - match element { - Element::Tree(root_key, _) => { - let $root_key = root_key; - let $is_sum_tree = false; - $($body)* - } - Element::SumTree(root_key, ..) => { - let $root_key = root_key; - let $is_sum_tree = true; - $($body)* - } - _ => { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - } - } + let Some(($root_key, $tree_type)) + = element.root_key_and_tree_type_owned() else + { + return Err(Error::CorruptedData( + "parent is not a tree" + .to_owned(), + )).wrap_with_cost($cost); + }; + $($body)* }, Err(e) => Err(e), } @@ -293,7 +255,7 @@ macro_rules! merk_optional_tx { &mut $cost, ::grovedb_merk::Merk::open_base( storage.unwrap_add_cost(&mut $cost), - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), $grove_version, ).map(|merk_res| @@ -315,7 +277,7 @@ macro_rules! merk_optional_tx { $transaction, storage, root_key, - is_sum_tree, + tree_type, $grove_version, { #[allow(unused_mut)] @@ -324,7 +286,7 @@ macro_rules! merk_optional_tx { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), $grove_version, ).map(|merk_res| @@ -367,7 +329,7 @@ macro_rules! merk_optional_tx_internal_error { &mut $cost, ::grovedb_merk::Merk::open_base( storage.unwrap_add_cost(&mut $cost), - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), $grove_version ).map(|merk_res| @@ -389,7 +351,7 @@ macro_rules! merk_optional_tx_internal_error { $transaction, storage, root_key, - is_sum_tree, + tree_type, $grove_version, { #[allow(unused_mut)] @@ -398,7 +360,7 @@ macro_rules! merk_optional_tx_internal_error { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), $grove_version, ).map(|merk_res| @@ -438,7 +400,7 @@ macro_rules! merk_optional_tx_path_not_empty { $transaction, storage, root_key, - is_sum_tree, + tree_type, $grove_version, { #[allow(unused_mut)] @@ -447,7 +409,7 @@ macro_rules! merk_optional_tx_path_not_empty { ::grovedb_merk::Merk::open_layered_with_root_key( storage, root_key, - is_sum_tree, + tree_type, Some(&Element::value_defined_cost_for_serialized_value), $grove_version, ).map(|merk_res| @@ -489,7 +451,7 @@ macro_rules! root_merk_optional_tx { &mut $cost, ::grovedb_merk::Merk::open_base( storage.unwrap_add_cost(&mut $cost), - false, + TreeType::NormalTree, Some(&Element::value_defined_cost_for_serialized_value), $grove_version, ).map(|merk_res| diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 39cf3432..8fdccc7e 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -95,6 +95,39 @@ impl Visualize for Element { drawer = root_key.as_deref().visualize(drawer)?; drawer.write(format!(" {value}").as_bytes())?; + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } + } + Element::BigSumTree(root_key, value, flags) => { + drawer.write(b"big_sum_tree: ")?; + drawer = root_key.as_deref().visualize(drawer)?; + drawer.write(format!(" {value}").as_bytes())?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } + } + Element::CountTree(root_key, value, flags) => { + drawer.write(b"count_tree: ")?; + drawer = root_key.as_deref().visualize(drawer)?; + drawer.write(format!(" {value}").as_bytes())?; + + if let Some(f) = flags { + if !f.is_empty() { + drawer = f.visualize(drawer)?; + } + } + } + Element::CountSumTree(root_key, count_value, sum_value, flags) => { + drawer.write(b"count_sum_tree: ")?; + drawer = root_key.as_deref().visualize(drawer)?; + drawer.write(format!("count: {count_value}, sum {sum_value}").as_bytes())?; + if let Some(f) = flags { if !f.is_empty() { drawer = f.visualize(drawer)?; diff --git a/grovedbg-types/Cargo.toml b/grovedbg-types/Cargo.toml index 7c5eb549..357ce04a 100644 --- a/grovedbg-types/Cargo.toml +++ b/grovedbg-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedbg-types" -version = "2.2.1" +version = "3.0.0" edition = "2021" description = "Common type definitions for data exchange over GroveDBG protocol" authors = ["Evgeny Fomin "] diff --git a/grovedbg-types/src/lib.rs b/grovedbg-types/src/lib.rs index dd9fc007..fb9c6d90 100644 --- a/grovedbg-types/src/lib.rs +++ b/grovedbg-types/src/lib.rs @@ -125,6 +125,28 @@ pub enum Element { #[serde_as(as = "Option")] element_flags: Option>, }, + BigSumTree { + #[serde_as(as = "Option")] + root_key: Option, + sum: i128, + #[serde_as(as = "Option")] + element_flags: Option>, + }, + CountTree { + #[serde_as(as = "Option")] + root_key: Option, + count: u64, + #[serde_as(as = "Option")] + element_flags: Option>, + }, + CountSumTree { + #[serde_as(as = "Option")] + root_key: Option, + count: u64, + sum: i64, + #[serde_as(as = "Option")] + element_flags: Option>, + }, Item { #[serde_as(as = "Base64")] value: Vec, @@ -261,6 +283,9 @@ pub enum MerkProofNode { pub enum TreeFeatureType { BasicMerkNode, SummedMerkNode(i64), + BigSummedMerkNode(i128), + CountedMerkNode(u64), + CountedSummedMerkNode(u64, i64), } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 903c61d7..07d0b930 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "grovedb-merk" description = "Merkle key/value store adapted for GroveDB" -version = "2.2.1" +version = "3.0.0" authors = ["Samuel Westrich ", "Wisdom Ogwu ", "Matt Bell "] edition = "2021" license = "MIT" @@ -11,11 +11,11 @@ readme = "README.md" documentation = "https://docs.rs/grovedb-merk" [dependencies] -grovedb-costs = { version = "2.2.1" , path = "../costs" } -grovedb-path = { version = "2.2.1", path = "../path" } -grovedb-storage = { version = "2.2.1", path = "../storage", optional = true } -grovedb-version = { version = "2.2.1", path = "../grovedb-version" } -grovedb-visualize = { version = "2.2.1", path = "../visualize" } +grovedb-costs = { version = "3.0.0" , path = "../costs" } +grovedb-path = { version = "3.0.0", path = "../path" } +grovedb-storage = { version = "3.0.0", path = "../storage", optional = true } +grovedb-version = { version = "3.0.0", path = "../grovedb-version" } +grovedb-visualize = { version = "3.0.0", path = "../visualize" } bincode = { version = "2.0.0-rc.3" } hex = "0.4.3" @@ -24,6 +24,7 @@ integer-encoding = "4.0.0" thiserror = "2.0.11" serde = { version = "1.0.210", features = ["derive"], optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } +byteorder = { version = "1.5.0" } [dependencies.colored] version = "3.0.0" diff --git a/merk/src/error.rs b/merk/src/error.rs index 8fdc1cfc..6eb24385 100644 --- a/merk/src/error.rs +++ b/merk/src/error.rs @@ -117,6 +117,12 @@ pub enum Error { #[error(transparent)] /// Version error VersionError(grovedb_version::error::GroveVersionError), + + #[error("big sum tree under normal sum tree error {0}")] + BigSumTreeUnderNormalSumTree(String), + + #[error("unknown tree type {0}")] + UnknownTreeType(String), } impl From for Error { diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index 6e877efe..3b535767 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -3,6 +3,8 @@ #[cfg(feature = "minimal")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; #[cfg(feature = "minimal")] +use grovedb_version::{check_grovedb_v0_or_v1, error::GroveVersionError, version::GroveVersion}; +#[cfg(feature = "minimal")] use integer_encoding::VarInt; #[cfg(feature = "minimal")] @@ -12,6 +14,7 @@ use crate::{ tree::{kv::KV, Link, TreeNode}, HASH_BLOCK_SIZE, HASH_BLOCK_SIZE_U32, HASH_LENGTH, HASH_LENGTH_U32, }; +use crate::{merk::NodeType, tree_type::TreeType}; #[cfg(feature = "minimal")] /// Average key size @@ -38,26 +41,88 @@ pub enum EstimatedSumTrees { SomeSumTrees { /// Sum trees weight sum_trees_weight: Weight, + /// Big Sum trees weight + big_sum_trees_weight: Weight, + /// Count trees weight + count_trees_weight: Weight, + /// Count Sum trees weight + count_sum_trees_weight: Weight, /// Non sum trees weight non_sum_trees_weight: Weight, }, /// All sum trees AllSumTrees, + /// All big sum trees + AllBigSumTrees, + /// All count trees + AllCountTrees, + /// All count sum trees + AllCountSumTrees, } -#[cfg(feature = "minimal")] #[cfg(feature = "minimal")] impl EstimatedSumTrees { - fn estimated_size(&self) -> Result { + fn estimated_size(&self, grove_version: &GroveVersion) -> Result { + let version = check_grovedb_v0_or_v1!( + "EstimatedSumTrees::estimated_size", + grove_version + .merk_versions + .average_case_costs + .sum_tree_estimated_size + ); match self { EstimatedSumTrees::NoSumTrees => Ok(0), EstimatedSumTrees::SomeSumTrees { sum_trees_weight, + big_sum_trees_weight, + count_trees_weight, + count_sum_trees_weight, non_sum_trees_weight, - } => (*non_sum_trees_weight as u32 * 9) - .checked_div(*sum_trees_weight as u32 + *non_sum_trees_weight as u32) - .ok_or(Error::DivideByZero("weights add up to 0")), - EstimatedSumTrees::AllSumTrees => Ok(8), + } => { + // Example calculation including new weights + let total_weight = *sum_trees_weight as u32 + + *big_sum_trees_weight as u32 + + *count_trees_weight as u32 + + *count_sum_trees_weight as u32 + + *non_sum_trees_weight as u32; + if total_weight == 0 { + return Err(Error::DivideByZero("weights add up to 0")); + }; + if version == 0 { + Ok((*non_sum_trees_weight as u32 * 9) + / (*sum_trees_weight as u32 + *non_sum_trees_weight as u32)) + } else if version == 1 { + let estimated_size = (*sum_trees_weight as u32 + * TreeType::SumTree.inner_node_type().cost()) + .checked_add( + *big_sum_trees_weight as u32 + * TreeType::BigSumTree.inner_node_type().cost(), + ) + .and_then(|sum| { + sum.checked_add( + *count_trees_weight as u32 + * TreeType::CountTree.inner_node_type().cost(), + ) + }) + .and_then(|sum| { + sum.checked_add( + *count_sum_trees_weight as u32 + * TreeType::CountSumTree.inner_node_type().cost(), + ) + }) + .ok_or(Error::Overflow("Estimated size calculation overflowed"))?; + + Ok(estimated_size / total_weight) + } else { + Err(Error::CorruptedCodeExecution("we already checked versions")) + } + } + EstimatedSumTrees::AllSumTrees => Ok(TreeType::SumTree.inner_node_type().cost()), + EstimatedSumTrees::AllBigSumTrees => Ok(TreeType::BigSumTree.inner_node_type().cost()), + EstimatedSumTrees::AllCountTrees => Ok(TreeType::CountTree.inner_node_type().cost()), + EstimatedSumTrees::AllCountSumTrees => { + Ok(TreeType::CountSumTree.inner_node_type().cost()) + } } } } @@ -126,19 +191,26 @@ impl EstimatedLayerSizes { /// Returns the size of a subtree's feature and flags /// This only takes into account subtrees in the estimated layer info /// Only should be used when it is known to be a subtree - pub fn subtree_with_feature_and_flags_size(&self) -> Result { + pub fn subtree_with_feature_and_flags_size( + &self, + grove_version: &GroveVersion, + ) -> Result { match self { EstimatedLayerSizes::AllSubtrees(_, estimated_sum_trees, flags_size) => { // 1 for enum type // 1 for empty // 1 for flags size - Ok(estimated_sum_trees.estimated_size()? + flags_size.unwrap_or_default() + 3) + Ok(estimated_sum_trees.estimated_size(grove_version)? + + flags_size.unwrap_or_default() + + 3) } EstimatedLayerSizes::Mix { subtrees_size, .. } => match subtrees_size { None => Err(Error::WrongEstimatedCostsElementTypeForLevel( "this layer is a mix but doesn't have subtrees", )), - Some((_, est, fs, _)) => Ok(est.estimated_size()? + fs.unwrap_or_default() + 3), + Some((_, est, fs, _)) => { + Ok(est.estimated_size(grove_version)? + fs.unwrap_or_default() + 3) + } }, _ => Err(Error::WrongEstimatedCostsElementTypeForLevel( "this layer needs to have trees", @@ -147,7 +219,10 @@ impl EstimatedLayerSizes { } /// Returns the size of a value's feature and flags - pub fn value_with_feature_and_flags_size(&self) -> Result { + pub fn value_with_feature_and_flags_size( + &self, + grove_version: &GroveVersion, + ) -> Result { match self { EstimatedLayerSizes::AllItems(_, average_value_size, flags_size) => { // 1 for enum type @@ -166,7 +241,9 @@ impl EstimatedLayerSizes { // 1 for enum type // 1 for empty // 1 for flags size - Ok(estimated_sum_trees.estimated_size()? + flags_size.unwrap_or_default() + 3) + Ok(estimated_sum_trees.estimated_size(grove_version)? + + flags_size.unwrap_or_default() + + 3) } EstimatedLayerSizes::Mix { subtrees_size, @@ -186,7 +263,7 @@ impl EstimatedLayerSizes { let (subtree_size, subtree_weight) = match subtrees_size { None => None, Some((_, est, fs, weight)) => Some(( - est.estimated_size()? + fs.unwrap_or_default() + 3, + est.estimated_size(grove_version)? + fs.unwrap_or_default() + 3, *weight as u32, )), } @@ -234,8 +311,8 @@ pub type EstimatedToBeEmpty = bool; #[derive(Clone, Copy, PartialEq, Eq, Debug)] /// Information on an estimated layer pub struct EstimatedLayerInformation { - /// Is sum tree? - pub is_sum_tree: bool, + /// The kind of tree we are in + pub tree_type: TreeType, /// Estimated layer count pub estimated_layer_count: EstimatedLayerCount, /// Estimated layer sizes @@ -291,13 +368,13 @@ impl TreeNode { pub fn average_case_encoded_tree_size( not_prefixed_key_len: u32, estimated_element_size: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { // two option values for the left and right link // the actual left and right link encoding size // the encoded kv node size - 2 + (2 * Link::encoded_link_size(not_prefixed_key_len, is_sum_node)) - + KV::encoded_kv_node_size(estimated_element_size, is_sum_node) + 2 + (2 * Link::encoded_link_size(not_prefixed_key_len, node_type)) + + KV::encoded_kv_node_size(estimated_element_size, node_type) } } @@ -307,7 +384,7 @@ pub fn add_average_case_get_merk_node( cost: &mut OperationCost, not_prefixed_key_len: u32, approximate_element_size: u32, - is_sum_tree: bool, + node_type: NodeType, ) -> Result<(), Error> { // Worst case scenario, the element is not already in memory. // One direct seek has to be performed to read the node from storage. @@ -318,7 +395,7 @@ pub fn add_average_case_get_merk_node( cost.storage_loaded_bytes += TreeNode::average_case_encoded_tree_size( not_prefixed_key_len, approximate_element_size, - is_sum_tree, + node_type, ) as u64; Ok(()) } @@ -340,11 +417,11 @@ pub fn add_average_case_merk_replace_layered( cost: &mut OperationCost, key_len: u32, value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) { cost.seek_count += 1; cost.storage_cost.replaced_bytes = - KV::layered_value_byte_cost_size_for_key_and_value_lengths(key_len, value_len, is_sum_node); + KV::layered_value_byte_cost_size_for_key_and_value_lengths(key_len, value_len, node_type); // first lets add the value hash cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); @@ -394,9 +471,12 @@ pub fn add_average_case_merk_root_hash(cost: &mut OperationCost) { #[cfg(feature = "minimal")] /// Average case cost of propagating a merk -pub fn average_case_merk_propagate(input: &EstimatedLayerInformation) -> CostResult<(), Error> { +pub fn average_case_merk_propagate( + input: &EstimatedLayerInformation, + grove_version: &GroveVersion, +) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - add_average_case_merk_propagate(&mut cost, input).wrap_with_cost(cost) + add_average_case_merk_propagate(&mut cost, input, grove_version).wrap_with_cost(cost) } #[cfg(feature = "minimal")] @@ -404,16 +484,310 @@ pub fn average_case_merk_propagate(input: &EstimatedLayerInformation) -> CostRes pub fn add_average_case_merk_propagate( cost: &mut OperationCost, input: &EstimatedLayerInformation, + grove_version: &GroveVersion, +) -> Result<(), Error> { + match grove_version + .merk_versions + .average_case_costs + .add_average_case_merk_propagate + { + 0 => add_average_case_merk_propagate_v0(cost, input, grove_version), + 1 => add_average_case_merk_propagate_v1(cost, input, grove_version), + version => Err(Error::VersionError( + GroveVersionError::UnknownVersionMismatch { + method: "add_average_case_merk_propagate".to_string(), + known_versions: vec![0, 1], + received: version, + }, + )), + } +} +#[cfg(feature = "minimal")] +/// Add average case cost for propagating a merk +fn add_average_case_merk_propagate_v1( + cost: &mut OperationCost, + input: &EstimatedLayerInformation, + grove_version: &GroveVersion, +) -> Result<(), Error> { + let mut nodes_updated = 0; + // Propagation requires to recompute and write hashes up to the root + let EstimatedLayerInformation { + tree_type, + estimated_layer_count, + estimated_layer_sizes, + } = input; + let levels = estimated_layer_count.estimate_levels(); + nodes_updated += levels; + + if levels > 1 { + // we can get about 1 rotation, if there are more than 2 levels + nodes_updated += 1; + } + cost.seek_count += nodes_updated as u32; + + cost.hash_node_calls += nodes_updated * 2; + + cost.storage_cost.replaced_bytes += match estimated_layer_sizes { + EstimatedLayerSizes::AllSubtrees( + average_key_size, + estimated_sum_trees, + average_flags_size, + ) => { + // it is normal to have LAYER_COST_SIZE here, as we add estimated sum tree + // additions right after + let value_len = LAYER_COST_SIZE + + average_flags_size + .map_or(0, |flags_len| flags_len + flags_len.required_space() as u32); + // in order to simplify calculations we get the estimated size and remove the + // cost for the basic merk + let sum_tree_addition = estimated_sum_trees.estimated_size(grove_version)?; + nodes_updated + * (KV::layered_value_byte_cost_size_for_key_and_value_lengths( + *average_key_size as u32, + value_len, + tree_type.inner_node_type(), + ) + sum_tree_addition) + } + EstimatedLayerSizes::AllItems(average_key_size, average_item_size, average_flags_size) + | EstimatedLayerSizes::AllReference( + average_key_size, + average_item_size, + average_flags_size, + ) => { + let flags_len = average_flags_size.unwrap_or(0); + let average_value_len = average_item_size + flags_len; + nodes_updated + * KV::value_byte_cost_size_for_key_and_raw_value_lengths( + *average_key_size as u32, + average_value_len, + tree_type.inner_node_type(), + ) + } + EstimatedLayerSizes::Mix { + subtrees_size, + items_size, + references_size, + } => { + let total_weight = subtrees_size + .as_ref() + .map(|(_, _, _, weight)| *weight as u32) + .unwrap_or_default() + + items_size + .as_ref() + .map(|(_, _, _, weight)| *weight as u32) + .unwrap_or_default() + + references_size + .as_ref() + .map(|(_, _, _, weight)| *weight as u32) + .unwrap_or_default(); + if total_weight == 0 { + 0 + } else { + let weighted_nodes_updated = (nodes_updated as u64) + .checked_mul(total_weight as u64) + .ok_or(Error::Overflow("overflow for weights average cost"))?; + let tree_node_updates_cost = match subtrees_size { + None => 0, + Some((average_key_size, estimated_sum_trees, average_flags_size, weight)) => { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = LAYER_COST_SIZE + flags_len; + let sum_tree_addition = + estimated_sum_trees.estimated_size(grove_version)?; + let cost = KV::layered_value_byte_cost_size_for_key_and_value_lengths( + *average_key_size as u32, + value_len, + tree_type.inner_node_type(), + ) + sum_tree_addition; + (*weight as u64) + .checked_mul(cost as u64) + .ok_or(Error::Overflow("overflow for mixed tree nodes updates"))? + } + }; + let item_node_updates_cost = match items_size { + None => 0, + Some((average_key_size, average_value_size, average_flags_size, weight)) => { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = average_value_size + flags_len; + let cost = KV::value_byte_cost_size_for_key_and_raw_value_lengths( + *average_key_size as u32, + value_len, + tree_type.inner_node_type(), + ); + (*weight as u64) + .checked_mul(cost as u64) + .ok_or(Error::Overflow("overflow for mixed item nodes updates"))? + } + }; + let reference_node_updates_cost = match references_size { + None => 0, + Some((average_key_size, average_value_size, average_flags_size, weight)) => { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = average_value_size + flags_len; + let cost = KV::value_byte_cost_size_for_key_and_raw_value_lengths( + *average_key_size as u32, + value_len, + tree_type.inner_node_type(), + ); + (*weight as u64) + .checked_mul(cost as u64) + .ok_or(Error::Overflow("overflow for mixed item nodes updates"))? + } + }; + + let total_updates_cost = tree_node_updates_cost + .checked_add(item_node_updates_cost) + .and_then(|c| c.checked_add(reference_node_updates_cost)) + .ok_or(Error::Overflow("overflow for mixed item adding parts"))?; + let total_replaced_bytes = total_updates_cost / weighted_nodes_updated; + if total_replaced_bytes > u32::MAX as u64 { + return Err(Error::Overflow( + "overflow for total replaced bytes more than u32 max", + )); + } + total_replaced_bytes as u32 + } + } + }; + cost.storage_loaded_bytes += match estimated_layer_sizes { + EstimatedLayerSizes::AllSubtrees( + average_key_size, + estimated_sum_trees, + average_flags_size, + ) => { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = LAYER_COST_SIZE + flags_len; + let sum_tree_addition = estimated_sum_trees.estimated_size(grove_version)?; + nodes_updated + * KV::layered_node_byte_cost_size_for_key_and_value_lengths( + *average_key_size as u32, + value_len + sum_tree_addition, + tree_type.inner_node_type(), + ) + } + EstimatedLayerSizes::AllItems(average_key_size, average_item_size, average_flags_size) + | EstimatedLayerSizes::AllReference( + average_key_size, + average_item_size, + average_flags_size, + ) => { + let flags_len = average_flags_size.unwrap_or(0); + let average_value_len = average_item_size + flags_len; + nodes_updated + * KV::node_byte_cost_size_for_key_and_raw_value_lengths( + *average_key_size as u32, + average_value_len, + tree_type.inner_node_type(), + ) + } + EstimatedLayerSizes::Mix { + subtrees_size, + items_size, + references_size, + } => { + let total_weight = subtrees_size + .as_ref() + .map(|(_, _, _, weight)| *weight as u32) + .unwrap_or_default() + + items_size + .as_ref() + .map(|(_, _, _, weight)| *weight as u32) + .unwrap_or_default() + + references_size + .as_ref() + .map(|(_, _, _, weight)| *weight as u32) + .unwrap_or_default(); + if total_weight == 0 { + 0 + } else { + let weighted_nodes_updated = (nodes_updated as u64) + .checked_mul(total_weight as u64) + .ok_or(Error::Overflow("overflow for weights average cost"))?; + let tree_node_updates_cost = subtrees_size + .as_ref() + .map( + |(average_key_size, estimated_sum_trees, average_flags_size, weight)| { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = LAYER_COST_SIZE + flags_len; + let sum_tree_addition = + estimated_sum_trees.estimated_size(grove_version)?; + let cost = KV::layered_node_byte_cost_size_for_key_and_value_lengths( + *average_key_size as u32, + value_len + sum_tree_addition, + tree_type.inner_node_type(), + ); + (*weight as u64) + .checked_mul(cost as u64) + .ok_or(Error::Overflow("overflow for mixed tree nodes updates")) + }, + ) + .unwrap_or(Ok(0))?; + let item_node_updates_cost = items_size + .as_ref() + .map( + |(average_key_size, average_value_size, average_flags_size, weight)| { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = average_value_size + flags_len; + let cost = KV::node_byte_cost_size_for_key_and_raw_value_lengths( + *average_key_size as u32, + value_len, + tree_type.inner_node_type(), + ); + (*weight as u64) + .checked_mul(cost as u64) + .ok_or(Error::Overflow("overflow for mixed item nodes updates")) + }, + ) + .unwrap_or(Ok(0))?; + let reference_node_updates_cost = references_size + .as_ref() + .map( + |(average_key_size, average_value_size, average_flags_size, weight)| { + let flags_len = average_flags_size.unwrap_or(0); + let value_len = average_value_size + flags_len; + let cost = KV::node_byte_cost_size_for_key_and_raw_value_lengths( + *average_key_size as u32, + value_len, + TreeType::NormalTree.inner_node_type(), + ); + (*weight as u64) + .checked_mul(cost as u64) + .ok_or(Error::Overflow("overflow for mixed item nodes updates")) + }, + ) + .unwrap_or(Ok(0))?; + + let total_updates_cost = tree_node_updates_cost + .checked_add(item_node_updates_cost) + .and_then(|c| c.checked_add(reference_node_updates_cost)) + .ok_or(Error::Overflow("overflow for mixed item adding parts"))?; + let total_loaded_bytes = total_updates_cost / weighted_nodes_updated; + if total_loaded_bytes > u32::MAX as u64 { + return Err(Error::Overflow( + "overflow for total replaced bytes more than u32 max", + )); + } + total_loaded_bytes as u32 + } + } + } as u64; + Ok(()) +} + +#[cfg(feature = "minimal")] +/// Add average case cost for propagating a merk +fn add_average_case_merk_propagate_v0( + cost: &mut OperationCost, + input: &EstimatedLayerInformation, + grove_version: &GroveVersion, ) -> Result<(), Error> { let mut nodes_updated = 0; // Propagation requires to recompute and write hashes up to the root let EstimatedLayerInformation { - is_sum_tree, + tree_type, estimated_layer_count, estimated_layer_sizes, } = input; let levels = estimated_layer_count.estimate_levels(); - let in_sum_tree = *is_sum_tree; nodes_updated += levels; if levels > 1 { @@ -437,12 +811,12 @@ pub fn add_average_case_merk_propagate( .map_or(0, |flags_len| flags_len + flags_len.required_space() as u32); // in order to simplify calculations we get the estimated size and remove the // cost for the basic merk - let sum_tree_addition = estimated_sum_trees.estimated_size()?; + let sum_tree_addition = estimated_sum_trees.estimated_size(grove_version)?; nodes_updated * (KV::layered_value_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len, - *is_sum_tree, + tree_type.inner_node_type(), ) + sum_tree_addition) } EstimatedLayerSizes::AllItems(average_key_size, average_item_size, average_flags_size) @@ -457,7 +831,7 @@ pub fn add_average_case_merk_propagate( * KV::value_byte_cost_size_for_key_and_raw_value_lengths( *average_key_size as u32, average_value_len, - in_sum_tree, + tree_type.inner_node_type(), ) } EstimatedLayerSizes::Mix { @@ -488,11 +862,12 @@ pub fn add_average_case_merk_propagate( Some((average_key_size, estimated_sum_trees, average_flags_size, weight)) => { let flags_len = average_flags_size.unwrap_or(0); let value_len = LAYER_COST_SIZE + flags_len; - let sum_tree_addition = estimated_sum_trees.estimated_size()?; + let sum_tree_addition = + estimated_sum_trees.estimated_size(grove_version)?; let cost = KV::layered_value_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len, - in_sum_tree, + tree_type.inner_node_type(), ) + sum_tree_addition; (*weight as u64) .checked_mul(cost as u64) @@ -507,7 +882,7 @@ pub fn add_average_case_merk_propagate( let cost = KV::value_byte_cost_size_for_key_and_raw_value_lengths( *average_key_size as u32, value_len, - in_sum_tree, + tree_type.inner_node_type(), ); (*weight as u64) .checked_mul(cost as u64) @@ -522,7 +897,7 @@ pub fn add_average_case_merk_propagate( let cost = KV::value_byte_cost_size_for_key_and_raw_value_lengths( *average_key_size as u32, value_len, - in_sum_tree, + tree_type.inner_node_type(), ); (*weight as u64) .checked_mul(cost as u64) @@ -552,12 +927,12 @@ pub fn add_average_case_merk_propagate( ) => { let flags_len = average_flags_size.unwrap_or(0); let value_len = LAYER_COST_SIZE + flags_len; - let sum_tree_addition = estimated_sum_trees.estimated_size()?; + let sum_tree_addition = estimated_sum_trees.estimated_size(grove_version)?; nodes_updated * KV::layered_node_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len + sum_tree_addition, - in_sum_tree, + tree_type.inner_node_type(), ) } EstimatedLayerSizes::AllItems(average_key_size, average_item_size, average_flags_size) @@ -572,7 +947,7 @@ pub fn add_average_case_merk_propagate( * KV::node_byte_cost_size_for_key_and_raw_value_lengths( *average_key_size as u32, average_value_len, - in_sum_tree, + tree_type.inner_node_type(), ) } EstimatedLayerSizes::Mix { @@ -604,11 +979,12 @@ pub fn add_average_case_merk_propagate( |(average_key_size, estimated_sum_trees, average_flags_size, weight)| { let flags_len = average_flags_size.unwrap_or(0); let value_len = LAYER_COST_SIZE + flags_len; - let sum_tree_addition = estimated_sum_trees.estimated_size()?; + let sum_tree_addition = + estimated_sum_trees.estimated_size(grove_version)?; let cost = KV::layered_node_byte_cost_size_for_key_and_value_lengths( *average_key_size as u32, value_len + sum_tree_addition, - in_sum_tree, + tree_type.inner_node_type(), ); (*weight as u64) .checked_mul(cost as u64) @@ -625,7 +1001,7 @@ pub fn add_average_case_merk_propagate( let cost = KV::node_byte_cost_size_for_key_and_raw_value_lengths( *average_key_size as u32, value_len, - in_sum_tree, + tree_type.inner_node_type(), ); (*weight as u64) .checked_mul(cost as u64) @@ -642,7 +1018,7 @@ pub fn add_average_case_merk_propagate( let cost = KV::node_byte_cost_size_for_key_and_raw_value_lengths( *average_key_size as u32, value_len, - false, + tree_type.inner_node_type(), // this was changed in v1 ); (*weight as u64) .checked_mul(cost as u64) diff --git a/merk/src/estimated_costs/mod.rs b/merk/src/estimated_costs/mod.rs index 7648246b..0ef4d18c 100644 --- a/merk/src/estimated_costs/mod.rs +++ b/merk/src/estimated_costs/mod.rs @@ -5,6 +5,10 @@ use grovedb_costs::OperationCost; #[cfg(feature = "minimal")] use integer_encoding::VarInt; +#[cfg(feature = "minimal")] +use crate::merk::NodeType; +#[cfg(feature = "minimal")] +use crate::tree_type::TreeType; #[cfg(feature = "minimal")] use crate::{tree::kv::KV, HASH_BLOCK_SIZE_U32, HASH_LENGTH_U32}; @@ -26,17 +30,36 @@ pub const LAYER_COST_SIZE: u32 = 3; /// The cost of a sum value pub const SUM_VALUE_EXTRA_COST: u32 = 9; +#[cfg(any(feature = "minimal", feature = "verify"))] +/// The cost of a count value +pub const COUNT_VALUE_EXTRA_COST: u32 = 9; + +#[cfg(any(feature = "minimal", feature = "verify"))] +/// The cost of a big sum value +pub const BIG_SUM_VALUE_EXTRA_COST: u32 = 16; + #[cfg(feature = "minimal")] /// The cost of a summed subtree layer /// This is the layer size + 9 for the encoded value pub const SUM_LAYER_COST_SIZE: u32 = LAYER_COST_SIZE + SUM_VALUE_EXTRA_COST; +#[cfg(feature = "minimal")] +/// The cost of a summed subtree layer +/// This is the layer size + 9 for the encoded value +pub const SUM_AND_COUNT_LAYER_COST_SIZE: u32 = + LAYER_COST_SIZE + SUM_VALUE_EXTRA_COST + COUNT_VALUE_EXTRA_COST; + +#[cfg(feature = "minimal")] +/// The cost of a summed subtree layer +/// This is the layer size + 16 for the encoded value +pub const BIG_SUM_LAYER_COST_SIZE: u32 = LAYER_COST_SIZE + BIG_SUM_VALUE_EXTRA_COST; + #[cfg(feature = "minimal")] impl KV { - fn encoded_kv_node_size(element_size: u32, is_sum_node: bool) -> u32 { + fn encoded_kv_node_size(element_size: u32, node_type: NodeType) -> u32 { // We always charge 8 bytes for the sum node (even though // it could theoretically be 9 bytes - let sum_node_feature_size = if is_sum_node { 9 } else { 1 }; + let sum_node_feature_size = node_type.feature_len(); // KV holds the state of a node // 32 bytes to encode the hash of the node // 32 bytes to encode the value hash @@ -51,13 +74,13 @@ pub fn add_cost_case_merk_insert( cost: &mut OperationCost, key_len: u32, value_len: u32, - in_tree_using_sums: bool, + in_tree_type: TreeType, ) { cost.seek_count += 1; cost.storage_cost.added_bytes += KV::node_byte_cost_size_for_key_and_raw_value_lengths( key_len, value_len, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); // .. and hash computation for the inserted element itself // first lets add the value hash @@ -75,13 +98,13 @@ pub fn add_cost_case_merk_insert_layered( cost: &mut OperationCost, key_len: u32, value_len: u32, - in_tree_using_sums: bool, + in_tree_type: TreeType, ) { cost.seek_count += 1; cost.storage_cost.added_bytes += KV::layered_node_byte_cost_size_for_key_and_value_lengths( key_len, value_len, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); // .. and hash computation for the inserted element itself // first lets add the value hash @@ -101,11 +124,11 @@ pub fn add_cost_case_merk_replace( cost: &mut OperationCost, key_len: u32, value_len: u32, - in_tree_using_sums: bool, + in_tree_type: TreeType, ) { cost.seek_count += 1; cost.storage_cost.added_bytes += - KV::node_value_byte_cost_size(key_len, value_len, in_tree_using_sums); + KV::node_value_byte_cost_size(key_len, value_len, in_tree_type.inner_node_type()); cost.storage_cost.replaced_bytes += KV::node_key_byte_cost_size(key_len); // .. and hash computation for the inserted element itself // first lets add the value hash @@ -124,13 +147,13 @@ pub fn add_cost_case_merk_replace_same_size( cost: &mut OperationCost, key_len: u32, value_len: u32, - in_tree_using_sums: bool, + in_tree_type: TreeType, ) { cost.seek_count += 1; cost.storage_cost.replaced_bytes += KV::node_byte_cost_size_for_key_and_raw_value_lengths( key_len, value_len, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); // .. and hash computation for the inserted element itself // first lets add the value hash @@ -148,13 +171,13 @@ pub fn add_cost_case_merk_replace_layered( cost: &mut OperationCost, key_len: u32, value_len: u32, - in_tree_using_sums: bool, + in_tree_type: TreeType, ) { cost.seek_count += 1; cost.storage_cost.replaced_bytes += KV::layered_node_byte_cost_size_for_key_and_value_lengths( key_len, value_len, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); // .. and hash computation for the inserted element itself // first lets add the value hash @@ -176,7 +199,7 @@ pub fn add_cost_case_merk_patch( key_len: u32, value_len: u32, change_in_bytes: i32, - in_tree_using_sums: bool, + in_tree_type: TreeType, ) { cost.seek_count += 1; if change_in_bytes >= 0 { @@ -185,12 +208,12 @@ pub fn add_cost_case_merk_patch( let old_byte_size = KV::node_byte_cost_size_for_key_and_raw_value_lengths( key_len, value_len - change_in_bytes as u32, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); let new_byte_size = KV::node_byte_cost_size_for_key_and_raw_value_lengths( key_len, value_len, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); cost.storage_cost.replaced_bytes += old_byte_size; @@ -199,7 +222,7 @@ pub fn add_cost_case_merk_patch( cost.storage_cost.replaced_bytes += KV::node_byte_cost_size_for_key_and_raw_value_lengths( key_len, value_len, - in_tree_using_sums, + in_tree_type.inner_node_type(), ); } diff --git a/merk/src/estimated_costs/worst_case_costs.rs b/merk/src/estimated_costs/worst_case_costs.rs index 3cbd6399..a9ef19e8 100644 --- a/merk/src/estimated_costs/worst_case_costs.rs +++ b/merk/src/estimated_costs/worst_case_costs.rs @@ -33,6 +33,8 @@ use std::cmp::Ordering; #[cfg(feature = "minimal")] use grovedb_costs::{CostResult, CostsExt, OperationCost}; +#[cfg(feature = "minimal")] +use crate::merk::NodeType; #[cfg(feature = "minimal")] use crate::{ error::Error, @@ -57,13 +59,13 @@ impl TreeNode { pub fn worst_case_encoded_tree_size( not_prefixed_key_len: u32, max_element_size: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { // two option values for the left and right link // the actual left and right link encoding size // the encoded kv node size - 2 + (2 * Link::encoded_link_size(not_prefixed_key_len, is_sum_node)) - + KV::encoded_kv_node_size(max_element_size, is_sum_node) + 2 + (2 * Link::encoded_link_size(not_prefixed_key_len, node_type)) + + KV::encoded_kv_node_size(max_element_size, node_type) } } @@ -73,7 +75,7 @@ pub fn add_worst_case_get_merk_node( cost: &mut OperationCost, not_prefixed_key_len: u32, max_element_size: u32, - is_sum_node: bool, + node_type: NodeType, ) -> Result<(), Error> { // Worst case scenario, the element is not already in memory. // One direct seek has to be performed to read the node from storage. @@ -82,7 +84,7 @@ pub fn add_worst_case_get_merk_node( // To write a node to disk, the left link, right link and kv nodes are encoded. // worst case, the node has both the left and right link present. cost.storage_loaded_bytes += - TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, is_sum_node) + TreeNode::worst_case_encoded_tree_size(not_prefixed_key_len, max_element_size, node_type) as u64; Ok(()) } @@ -104,10 +106,10 @@ pub fn add_worst_case_merk_insert( cost: &mut OperationCost, key_len: u32, value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) { cost.storage_cost.added_bytes += - KV::node_byte_cost_size_for_key_and_raw_value_lengths(key_len, value_len, is_sum_node); + KV::node_byte_cost_size_for_key_and_raw_value_lengths(key_len, value_len, node_type); // .. and hash computation for the inserted element itself // todo: verify this cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); @@ -119,12 +121,12 @@ pub fn add_worst_case_merk_replace_layered( cost: &mut OperationCost, key_len: u32, value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) { // todo: verify this cost.hash_node_calls += 1 + ((value_len - 1) / HASH_BLOCK_SIZE_U32); cost.storage_cost.replaced_bytes = - KV::layered_value_byte_cost_size_for_key_and_value_lengths(key_len, value_len, is_sum_node); + KV::layered_value_byte_cost_size_for_key_and_value_lengths(key_len, value_len, node_type); // 37 + 35 + key_len } diff --git a/merk/src/lib.rs b/merk/src/lib.rs index 0291314b..51d15afb 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -65,6 +65,8 @@ pub mod error; #[cfg(any(feature = "minimal", feature = "verify"))] pub mod estimated_costs; +#[cfg(any(feature = "minimal", feature = "verify"))] +pub mod tree_type; #[cfg(feature = "minimal")] mod visualize; @@ -79,12 +81,16 @@ pub use tree::{ }; #[cfg(any(feature = "minimal", feature = "verify"))] pub use tree::{CryptoHash, TreeFeatureType}; +#[cfg(any(feature = "minimal", feature = "verify"))] +pub use tree_type::MaybeTree; +#[cfg(any(feature = "minimal", feature = "verify"))] +pub use tree_type::TreeType; #[cfg(feature = "minimal")] pub use crate::merk::{ defaults::ROOT_KEY_KEY, prove::{ProofConstructionResult, ProofWithoutEncodingResult}, - IsSumTree, KVIterator, Merk, MerkType, RootHashKeyAndSum, + KVIterator, Merk, MerkType, RootHashKeyAndAggregateData, }; #[cfg(feature = "minimal")] pub use crate::visualize::VisualizeableMerk; diff --git a/merk/src/merk/apply.rs b/merk/src/merk/apply.rs index 9c5c9ec9..e524bac6 100644 --- a/merk/src/merk/apply.rs +++ b/merk/src/merk/apply.rs @@ -11,6 +11,7 @@ use grovedb_storage::StorageContext; use grovedb_version::version::GroveVersion; use crate::{ + merk::NodeType, tree::{ kv::{ValueDefinedCostType, KV}, AuxMerkBatch, Walker, @@ -64,7 +65,7 @@ where KB: AsRef<[u8]>, KA: AsRef<[u8]>, { - let use_sum_nodes = self.is_sum_tree; + let node_type: NodeType = self.tree_type.inner_node_type(); self.apply_with_costs_just_in_time_value_update( batch, aux, @@ -73,7 +74,7 @@ where Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( key.len() as u32, value.len() as u32, - use_sum_nodes, + node_type, )) }, None::<&fn(&[u8], &GroveVersion) -> Option>, diff --git a/merk/src/merk/committer.rs b/merk/src/merk/committer.rs index 9fb02987..49e4fbdc 100644 --- a/merk/src/merk/committer.rs +++ b/merk/src/merk/committer.rs @@ -44,7 +44,8 @@ impl Commit for MerkCommitter { let right_child_sizes = tree.child_ref_and_sum_size(false); self.batch.push(( tree.key().to_vec(), - tree.feature_type().sum_length(), + tree.feature_type() + .tree_feature_specialized_type_and_length(), Some((buf, left_child_sizes, right_child_sizes)), storage_costs, )); diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index ee0deccc..27d679c1 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -52,7 +52,7 @@ use committer::MerkCommitter; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_default, cost_return_on_error_no_add, storage_cost::key_value_cost::KeyValueStorageCost, ChildrenSizesWithValue, CostContext, - CostResult, CostsExt, FeatureSumLength, OperationCost, + CostResult, CostsExt, FeatureSumLength, OperationCost, TreeCostType, }; use grovedb_storage::{self, Batch, RawIterator, StorageContext}; use grovedb_version::version::GroveVersion; @@ -70,8 +70,10 @@ use crate::{ Query, }, tree::{ - kv::ValueDefinedCostType, AuxMerkBatch, CryptoHash, Op, RefWalker, TreeNode, NULL_HASH, + kv::ValueDefinedCostType, AggregateData, AuxMerkBatch, CryptoHash, Op, RefWalker, TreeNode, + NULL_HASH, }, + tree_type::TreeType, Error::{CostsError, EdError, StorageError}, Link, MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, @@ -105,16 +107,13 @@ impl KeyUpdates { /// Type alias for simple function signature pub type BatchValue = ( Vec, - Option, + Option<(TreeCostType, FeatureSumLength)>, ChildrenSizesWithValue, KeyValueStorageCost, ); -/// A bool type -pub type IsSumTree = bool; - /// Root hash key and sum -pub type RootHashKeyAndSum = (CryptoHash, Option>, Option); +pub type RootHashKeyAndAggregateData = (CryptoHash, Option>, AggregateData); /// KVIterator allows you to lazily iterate over each kv pair of a subtree pub struct KVIterator<'a, I: RawIterator> { @@ -243,6 +242,38 @@ impl MerkType { } } +#[cfg(any(feature = "minimal", feature = "verify"))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum NodeType { + NormalNode, + SumNode, + BigSumNode, + CountNode, + CountSumNode, +} + +impl NodeType { + pub const fn feature_len(&self) -> u32 { + match self { + NodeType::NormalNode => 1, + NodeType::SumNode => 9, + NodeType::BigSumNode => 17, + NodeType::CountNode => 9, + NodeType::CountSumNode => 17, + } + } + + pub const fn cost(&self) -> u32 { + match self { + NodeType::NormalNode => 0, + NodeType::SumNode => 8, + NodeType::BigSumNode => 16, + NodeType::CountNode => 8, + NodeType::CountSumNode => 16, + } + } +} + /// A handle to a Merkle key/value store backed by RocksDB. pub struct Merk { pub(crate) tree: Cell>, @@ -251,8 +282,8 @@ pub struct Merk { pub storage: S, /// Merk type pub merk_type: MerkType, - /// Is sum tree? - pub is_sum_tree: bool, + /// The tree type + pub tree_type: TreeType, } impl fmt::Debug for Merk { @@ -265,7 +296,7 @@ impl fmt::Debug for Merk { pub type UseTreeMutResult = CostResult< Vec<( Vec, - Option, + Option<(TreeCostType, FeatureSumLength)>, ChildrenSizesWithValue, KeyValueStorageCost, )>, @@ -295,11 +326,11 @@ where res } - /// Returns the total sum value in the Merk tree - pub fn sum(&self) -> Result, Error> { + /// Returns the total aggregate data in the Merk tree + pub fn aggregate_data(&self) -> Result { self.use_tree(|tree| match tree { - None => Ok(None), - Some(tree) => tree.sum(), + None => Ok(AggregateData::NoAggregateData), + Some(tree) => tree.aggregate_data(), }) } @@ -315,13 +346,16 @@ where } /// Returns the root hash and non-prefixed key of the tree. - pub fn root_hash_key_and_sum(&self) -> CostResult { + pub fn root_hash_key_and_aggregate_data( + &self, + ) -> CostResult { self.use_tree(|tree| match tree { - None => Ok((NULL_HASH, None, None)).wrap_with_cost(Default::default()), + None => Ok((NULL_HASH, None, AggregateData::NoAggregateData)) + .wrap_with_cost(Default::default()), Some(tree) => { - let sum = cost_return_on_error_default!(tree.sum()); + let aggregate_data = cost_return_on_error_default!(tree.aggregate_data()); tree.hash() - .map(|hash| Ok((hash, Some(tree.key().to_vec()), sum))) + .map(|hash| Ok((hash, Some(tree.key().to_vec()), aggregate_data))) } }) } @@ -663,21 +697,28 @@ where skip_sum_checks: bool, grove_version: &GroveVersion, ) { - let (hash, key, sum) = match link { - Link::Reference { hash, key, sum, .. } => { - (hash.to_owned(), key.to_owned(), sum.to_owned()) - } + let (hash, key, aggregate_data) = match link { + Link::Reference { + hash, + key, + aggregate_data, + .. + } => (hash.to_owned(), key.to_owned(), aggregate_data.to_owned()), Link::Modified { tree, .. } => ( tree.hash().unwrap(), tree.key().to_vec(), - tree.sum().unwrap(), + tree.aggregate_data().unwrap(), ), Link::Loaded { hash, child_heights: _, - sum, + aggregate_data, tree, - } => (hash.to_owned(), tree.key().to_vec(), sum.to_owned()), + } => ( + hash.to_owned(), + tree.key().to_vec(), + aggregate_data.to_owned(), + ), _ => todo!(), }; @@ -711,7 +752,7 @@ where } // Need to skip this when restoring a sum tree - if !skip_sum_checks && node.sum().unwrap() != sum { + if !skip_sum_checks && node.aggregate_data().unwrap() != aggregate_data { bad_link_map.insert(instruction_id.to_vec(), hash); parent_keys.insert(instruction_id.to_vec(), parent_key.to_vec()); return; @@ -762,10 +803,9 @@ mod test { use super::{Merk, RefWalker}; use crate::{ - merk::source::MerkSource, test_utils::*, tree::kv::ValueDefinedCostType, Op, - TreeFeatureType::BasicMerkNode, + merk::source::MerkSource, test_utils::*, tree::kv::ValueDefinedCostType, + tree_type::TreeType, Op, TreeFeatureType::BasicMerkNode, }; - // TODO: Close and then reopen test fn assert_invariants(merk: &TempMerk) { @@ -991,7 +1031,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1017,7 +1057,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1073,7 +1113,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1092,7 +1132,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1113,7 +1153,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1153,7 +1193,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1174,7 +1214,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1189,7 +1229,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1213,7 +1253,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1277,7 +1317,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) diff --git a/merk/src/merk/open.rs b/merk/src/merk/open.rs index c8646afa..a3d4c16e 100644 --- a/merk/src/merk/open.rs +++ b/merk/src/merk/open.rs @@ -6,6 +6,7 @@ use grovedb_version::version::GroveVersion; use crate::{ tree::kv::ValueDefinedCostType, + tree_type::TreeType, Error, Merk, MerkType, MerkType::{BaseMerk, LayeredMerk, StandaloneMerk}, }; @@ -15,20 +16,20 @@ where S: StorageContext<'db>, { /// Open empty tree - pub fn open_empty(storage: S, merk_type: MerkType, is_sum_tree: bool) -> Self { + pub fn open_empty(storage: S, merk_type: MerkType, tree_type: TreeType) -> Self { Self { tree: Cell::new(None), root_tree_key: Cell::new(None), storage, merk_type, - is_sum_tree, + tree_type, } } /// Open standalone tree pub fn open_standalone( storage: S, - is_sum_tree: bool, + tree_type: TreeType, value_defined_cost_fn: Option< impl Fn(&[u8], &GroveVersion) -> Option, >, @@ -39,7 +40,7 @@ where root_tree_key: Cell::new(None), storage, merk_type: StandaloneMerk, - is_sum_tree, + tree_type, }; merk.load_base_root(value_defined_cost_fn, grove_version) @@ -49,7 +50,7 @@ where /// Open base tree pub fn open_base( storage: S, - is_sum_tree: bool, + tree_type: TreeType, value_defined_cost_fn: Option< impl Fn(&[u8], &GroveVersion) -> Option, >, @@ -60,7 +61,7 @@ where root_tree_key: Cell::new(None), storage, merk_type: BaseMerk, - is_sum_tree, + tree_type, }; merk.load_base_root(value_defined_cost_fn, grove_version) @@ -71,7 +72,7 @@ where pub fn open_layered_with_root_key( storage: S, root_key: Option>, - is_sum_tree: bool, + tree_type: TreeType, value_defined_cost_fn: Option< impl Fn(&[u8], &GroveVersion) -> Option, >, @@ -82,7 +83,7 @@ where root_tree_key: Cell::new(root_key), storage, merk_type: LayeredMerk, - is_sum_tree, + tree_type, }; merk.load_root(value_defined_cost_fn, grove_version) @@ -101,7 +102,10 @@ mod test { use grovedb_version::version::GroveVersion; use tempfile::TempDir; - use crate::{tree::kv::ValueDefinedCostType, Merk, Op, TreeFeatureType::BasicMerkNode}; + use crate::{ + tree::kv::ValueDefinedCostType, tree_type::TreeType, Merk, Op, + TreeFeatureType::BasicMerkNode, + }; #[test] fn test_reopen_root_hash() { @@ -116,7 +120,7 @@ mod test { storage .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -143,7 +147,7 @@ mod test { storage .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -162,7 +166,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), Some(&batch)) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ); @@ -192,7 +196,7 @@ mod test { storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ); diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 1082e80b..0c1784fd 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -48,6 +48,7 @@ use crate::{ Node, Op, }, tree::{combine_hash, kv::ValueDefinedCostType, RefWalker, TreeNode}, + tree_type::TreeType, CryptoHash, Error, Error::{CostsError, StorageError}, Link, Merk, @@ -315,11 +316,16 @@ impl<'db, S: StorageContext<'db>> Restorer { .expect("rewrite is only called when traversal_instruction is not empty"); let updated_key = chunk_tree.key(); - let updated_sum = chunk_tree.sum(); + let updated_sum = chunk_tree.aggregate_data(); - if let Some(Link::Reference { key, sum, .. }) = parent.link_mut(*is_left) { + if let Some(Link::Reference { + key, + aggregate_data, + .. + }) = parent.link_mut(*is_left) + { *key = updated_key.to_vec(); - *sum = updated_sum; + *aggregate_data = updated_sum; } let parent_bytes = parent.encode(); @@ -449,7 +455,7 @@ impl<'db, S: StorageContext<'db>> Restorer { if !self .merk - .verify(self.merk.is_sum_tree, grove_version) + .verify(self.merk.tree_type == TreeType::NormalTree, grove_version) .0 .is_empty() { @@ -561,6 +567,7 @@ mod tests { chunk::tests::traverse_get_node_hash, error::ChunkError::InvalidChunkProof, }, test_utils::{make_batch_seq, TempMerk}, + tree_type::TreeType, Error::ChunkRestoringError, Merk, PanicSource, }; @@ -682,7 +689,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -932,7 +939,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -951,7 +958,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1024,7 +1031,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1093,7 +1100,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1175,7 +1182,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1277,7 +1284,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1359,7 +1366,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) @@ -1409,7 +1416,7 @@ mod tests { storage .get_immediate_storage_context(SubtreePath::empty(), &tx) .unwrap(), - false, + TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, ) diff --git a/merk/src/merk/source.rs b/merk/src/merk/source.rs index dd71e74e..7c7568be 100644 --- a/merk/src/merk/source.rs +++ b/merk/src/merk/source.rs @@ -4,6 +4,7 @@ use grovedb_version::version::GroveVersion; use crate::{ tree::{kv::ValueDefinedCostType, Fetch, TreeNode}, + tree_type::TreeType, Error, Link, Merk, }; @@ -14,7 +15,7 @@ where pub(in crate::merk) fn source(&self) -> MerkSource { MerkSource { storage: &self.storage, - is_sum_tree: self.is_sum_tree, + tree_type: self.tree_type, } } } @@ -22,14 +23,14 @@ where #[derive(Debug)] pub struct MerkSource<'s, S> { storage: &'s S, - is_sum_tree: bool, + tree_type: TreeType, } impl<'s, S> Clone for MerkSource<'s, S> { fn clone(&self) -> Self { MerkSource { storage: self.storage, - is_sum_tree: self.is_sum_tree, + tree_type: self.tree_type, } } } diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index 4b2037fe..dafd09aa 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -18,8 +18,8 @@ use crate::{error::Error, tree::CryptoHash}; #[cfg(feature = "minimal")] use crate::{ proofs::chunk::chunk::{LEFT, RIGHT}, + tree::AggregateData, Link, - TreeFeatureType::SummedMerkNode, }; #[cfg(any(feature = "minimal", feature = "verify"))] @@ -36,24 +36,22 @@ pub struct Child { impl Child { #[cfg(feature = "minimal")] pub fn as_link(&self) -> Link { - let (key, sum) = match &self.tree.node { - Node::KV(key, _) | Node::KVValueHash(key, ..) => (key.as_slice(), None), + let (key, aggregate_data) = match &self.tree.node { + Node::KV(key, _) | Node::KVValueHash(key, ..) => { + (key.as_slice(), AggregateData::NoAggregateData) + } Node::KVValueHashFeatureType(key, _, _, feature_type) => { - let sum_value = match feature_type { - SummedMerkNode(sum) => Some(*sum), - _ => None, - }; - (key.as_slice(), sum_value) + (key.as_slice(), (*feature_type).into()) } // for the connection between the trunk and leaf chunks, we don't // have the child key so we must first write in an empty one. once // the leaf gets verified, we can write in this key to its parent - _ => (&[] as &[u8], None), + _ => (&[] as &[u8], AggregateData::NoAggregateData), }; Link::Reference { hash: self.hash, - sum, + aggregate_data, child_heights: ( self.tree.child_heights.0 as u8, self.tree.child_heights.1 as u8, @@ -294,12 +292,9 @@ impl Tree { } #[cfg(feature = "minimal")] - pub(crate) fn sum(&self) -> Option { + pub(crate) fn aggregate_data(&self) -> AggregateData { match self.node { - Node::KVValueHashFeatureType(.., feature_type) => match feature_type { - SummedMerkNode(sum) => Some(sum), - _ => None, - }, + Node::KVValueHashFeatureType(.., feature_type) => feature_type.into(), _ => panic!("Expected node to be type KVValueHashFeatureType"), } } @@ -527,6 +522,7 @@ where #[cfg(test)] mod test { use super::{super::*, Tree as ProofTree, *}; + use crate::TreeFeatureType::SummedMerkNode; fn make_7_node_prooftree() -> ProofTree { let make_node = |i| -> super::super::tree::Tree { Node::KV(vec![i], vec![]).into() }; @@ -639,7 +635,7 @@ mod test { left_link, Link::Reference { hash: tree.left.as_ref().map(|node| node.hash).unwrap(), - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (0, 0), key: vec![1] } @@ -649,7 +645,7 @@ mod test { right_link, Link::Reference { hash: tree.right.as_ref().map(|node| node.hash).unwrap(), - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (0, 0), key: vec![3] } @@ -688,7 +684,7 @@ mod test { left_link, Link::Reference { hash: tree.left.as_ref().map(|node| node.hash).unwrap(), - sum: Some(3), + aggregate_data: AggregateData::Sum(3), child_heights: (0, 0), key: vec![1] } @@ -698,7 +694,7 @@ mod test { right_link, Link::Reference { hash: tree.right.as_ref().map(|node| node.hash).unwrap(), - sum: Some(1), + aggregate_data: AggregateData::Sum(1), child_heights: (0, 0), key: vec![3] } diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 45beda4f..76eec948 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -44,6 +44,7 @@ use crate::{ kv::{ValueDefinedCostType, KV}, BatchEntry, MerkBatch, NoopCommit, Op, PanicSource, TreeNode, Walker, }, + tree_type::TreeType, Merk, TreeFeatureType::{BasicMerkNode, SummedMerkNode}, }; @@ -80,7 +81,7 @@ pub fn apply_memonly_unchecked( batch: &MerkBatch>, grove_version: &GroveVersion, ) -> TreeNode { - let is_sum_node = tree.is_sum_node(); + let node_type = tree.node_type(); let walker = Walker::::new(tree, PanicSource {}); let mut tree = Walker::::apply_to( Some(walker), @@ -90,7 +91,7 @@ pub fn apply_memonly_unchecked( Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( key.len() as u32, value.len() as u32, - is_sum_node, + node_type, )) }, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -108,12 +109,12 @@ pub fn apply_memonly_unchecked( .expect("apply failed") .0 .expect("expected tree"); - let is_sum_node = tree.is_sum_node(); + let node_type = tree.node_type(); tree.commit(&mut NoopCommit {}, &|key, value| { Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( key.len() as u32, value.len() as u32, - is_sum_node, + node_type, )) }) .unwrap() @@ -138,7 +139,7 @@ pub fn apply_memonly( pub fn apply_to_memonly( maybe_tree: Option, batch: &MerkBatch>, - is_sum_tree: bool, + tree_type: TreeType, grove_version: &GroveVersion, ) -> Option { let maybe_walker = maybe_tree.map(|tree| Walker::::new(tree, PanicSource {})); @@ -150,7 +151,7 @@ pub fn apply_to_memonly( Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( key.len() as u32, value.len() as u32, - is_sum_tree, + tree_type.inner_node_type(), )) }, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -168,12 +169,12 @@ pub fn apply_to_memonly( .expect("apply failed") .0 .map(|mut tree| { - let is_sum_node = tree.is_sum_node(); + let node_type = tree.node_type(); tree.commit(&mut NoopCommit {}, &|key, value| { Ok(KV::layered_value_byte_cost_size_for_key_and_value_lengths( key.len() as u32, value.len() as u32, - is_sum_node, + node_type, )) }) .unwrap() @@ -320,7 +321,7 @@ where storage .get_storage_context(SubtreePath::empty(), Some(batch)) .unwrap(), - false, + TreeType::NormalTree, None:: Option>, grove_version, ) @@ -340,7 +341,7 @@ where storage .get_storage_context(SubtreePath::empty(), None) .unwrap(), - false, + TreeType::NormalTree, None:: Option>, grove_version, ) diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index 9a059712..a9b3b26e 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -40,9 +40,9 @@ use grovedb_storage::{ }; use grovedb_version::version::GroveVersion; -use crate::tree::kv::ValueDefinedCostType; #[cfg(feature = "minimal")] use crate::Merk; +use crate::{tree::kv::ValueDefinedCostType, tree_type::TreeType}; #[cfg(feature = "minimal")] /// Wraps a Merk instance and deletes it from disk it once it goes out of scope. @@ -66,7 +66,7 @@ impl TempMerk { let merk = Merk::open_base( context, - false, + TreeType::NormalTree, None:: Option>, grove_version, ) @@ -93,7 +93,7 @@ impl TempMerk { .unwrap(); self.merk = Merk::open_base( context, - false, + TreeType::NormalTree, None:: Option>, grove_version, ) diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 1e1a3bea..2ef07cf5 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -147,7 +147,10 @@ impl TreeNode { #[cfg(test)] mod tests { use super::{super::Link, *}; - use crate::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; + use crate::{ + tree::AggregateData, + TreeFeatureType::{BasicMerkNode, SummedMerkNode}, + }; #[test] fn encode_leaf_tree() { @@ -196,7 +199,7 @@ mod tests { [55; 32], Some(Link::Loaded { hash: [66; 32], - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (123, 124), tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), @@ -225,7 +228,7 @@ mod tests { [55; 32], Some(Link::Uncommitted { hash: [66; 32], - sum: Some(10), + aggregate_data: AggregateData::Sum(10), child_heights: (123, 124), tree: TreeNode::new(vec![2], vec![3], None, BasicMerkNode).unwrap(), }), @@ -254,7 +257,7 @@ mod tests { [55; 32], Some(Link::Reference { hash: [66; 32], - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (123, 124), key: vec![2], }), @@ -328,7 +331,7 @@ mod tests { key, child_heights, hash, - sum: _, + aggregate_data: _, }) = tree.link(true) { assert_eq!(*key, [2]); diff --git a/merk/src/tree/kv.rs b/merk/src/tree/kv.rs index 7de707db..8ad5349b 100644 --- a/merk/src/tree/kv.rs +++ b/merk/src/tree/kv.rs @@ -12,7 +12,11 @@ use integer_encoding::VarInt; #[cfg(feature = "minimal")] use super::hash::{CryptoHash, HASH_LENGTH, NULL_HASH}; -use crate::tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}; +#[cfg(feature = "minimal")] +use crate::{ + merk::NodeType, + tree::kv::ValueDefinedCostType::{LayeredValueDefinedCost, SpecializedValueDefinedCost}, +}; #[cfg(feature = "minimal")] use crate::{ tree::{ @@ -21,7 +25,6 @@ use crate::{ }, Link, HASH_LENGTH_U32, HASH_LENGTH_U32_X2, }; - // TODO: maybe use something similar to Vec but without capacity field, // (should save 16 bytes per entry). also, maybe a shorter length // field to save even more. also might be possible to combine key @@ -275,16 +278,16 @@ impl KV { pub fn node_value_byte_cost_size( not_prefixed_key_len: u32, raw_value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { // Sum trees are either 1 or 9 bytes. While they might be more or less on disk, // costs can not take advantage of the varint aspect of the feature. - let feature_len = if is_sum_node { 9 } else { 1 }; + let feature_len = node_type.feature_len(); let value_size = raw_value_len + HASH_LENGTH_U32_X2 + feature_len; // The node will be a child of another node which stores it's key and hash // That will be added during propagation - let parent_to_child_cost = Link::encoded_link_size(not_prefixed_key_len, is_sum_node); + let parent_to_child_cost = Link::encoded_link_size(not_prefixed_key_len, node_type); value_size + value_size.required_space() as u32 + parent_to_child_cost } @@ -294,10 +297,10 @@ impl KV { pub fn node_byte_cost_size_for_key_and_raw_value_lengths( not_prefixed_key_len: u32, raw_value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { let node_value_size = - Self::node_value_byte_cost_size(not_prefixed_key_len, raw_value_len, is_sum_node); + Self::node_value_byte_cost_size(not_prefixed_key_len, raw_value_len, node_type); let node_key_size = Self::node_key_byte_cost_size(not_prefixed_key_len); // Each node stores the key and value, the value hash and node hash node_value_size + node_key_size @@ -308,11 +311,11 @@ impl KV { pub fn layered_node_byte_cost_size_for_key_and_value_lengths( not_prefixed_key_len: u32, value_len: u32, - is_sum_node: bool, // this means the node is contained in a sumtree + node_type: NodeType, ) -> u32 { // Sum trees are either 1 or 9 bytes. While they might be more or less on disk, // costs can not take advantage of the varint aspect of the feature. - let feature_len = if is_sum_node { 9 } else { 1 }; + let feature_len = node_type.feature_len(); // Each node stores the key and value, and the node hash // the value hash on a layered node is not stored directly in the node @@ -326,7 +329,7 @@ impl KV { let node_size = node_value_size + node_key_size; // The node will be a child of another node which stores it's key and hash // That will be added during propagation - let parent_to_child_cost = Link::encoded_link_size(not_prefixed_key_len, is_sum_node); + let parent_to_child_cost = Link::encoded_link_size(not_prefixed_key_len, node_type); node_size + parent_to_child_cost } @@ -336,11 +339,12 @@ impl KV { pub fn layered_value_byte_cost_size_for_key_and_value_lengths( not_prefixed_key_len: u32, value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { - // Sum trees are either 1 or 9 bytes. While they might be more or less on disk, + // Sum trees are either 1 or 9 bytes, or 16 bytes for the big sum trees. + // While they might be more or less on disk, // costs can not take advantage of the varint aspect of the feature. - let feature_len = if is_sum_node { 9 } else { 1 }; + let feature_len = node_type.feature_len(); // Each node stores the key and value, and the node hash // the value hash on a layered node is not stored directly in the node // The required space is set to 2. However in reality it could be 1 or 2. @@ -352,7 +356,7 @@ impl KV { let node_value_size = value_len + feature_len + HASH_LENGTH_U32 + 2; // The node will be a child of another node which stores it's key and hash // That will be added during propagation - let parent_to_child_cost = Link::encoded_link_size(not_prefixed_key_len, is_sum_node); + let parent_to_child_cost = Link::encoded_link_size(not_prefixed_key_len, node_type); node_value_size + parent_to_child_cost } @@ -362,7 +366,7 @@ impl KV { pub fn value_byte_cost_size_for_key_and_value_lengths( not_prefixed_key_len: u32, value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { // encoding a reference encodes the key last and doesn't encode the size of the // key. so no need for a varint required space calculation for the @@ -371,7 +375,7 @@ impl KV { // however we do need the varint required space for the cost of the key in // rocks_db let parent_to_child_reference_len = - Link::encoded_link_size(not_prefixed_key_len, is_sum_node); + Link::encoded_link_size(not_prefixed_key_len, node_type); value_len + value_len.required_space() as u32 + parent_to_child_reference_len } @@ -381,14 +385,14 @@ impl KV { pub(crate) fn value_byte_cost_size_for_key_and_raw_value_lengths( not_prefixed_key_len: u32, raw_value_len: u32, - is_sum_node: bool, + node_type: NodeType, ) -> u32 { - let sum_tree_len = if is_sum_node { 9 } else { 1 }; // 1 for option, 0 or 9 for sum feature + let sum_tree_len = node_type.feature_len(); // 1 for option, 0 or 9 for sum feature let value_len = raw_value_len + HASH_LENGTH_U32_X2 + sum_tree_len; Self::value_byte_cost_size_for_key_and_value_lengths( not_prefixed_key_len, value_len, - is_sum_node, + node_type, ) } @@ -400,7 +404,7 @@ impl KV { Self::value_byte_cost_size_for_key_and_value_lengths( key_len, value_len, - self.feature_type.is_sum_feature(), + self.feature_type.node_type(), ) } @@ -415,13 +419,9 @@ impl KV { #[inline] pub(crate) fn layered_value_byte_cost_size(&self, value_cost: u32) -> u32 { let key_len = self.key.len() as u32; - let is_sum_node = self.feature_type.is_sum_feature(); + let node_type = self.feature_type.node_type(); - Self::layered_value_byte_cost_size_for_key_and_value_lengths( - key_len, - value_cost, - is_sum_node, - ) + Self::layered_value_byte_cost_size_for_key_and_value_lengths(key_len, value_cost, node_type) } /// This function is used to calculate the cost of groveDB sum item nodes @@ -431,9 +431,9 @@ impl KV { #[inline] pub(crate) fn specialized_value_byte_cost_size(&self, value_cost: u32) -> u32 { let key_len = self.key.len() as u32; - let is_sum_node = self.feature_type.is_sum_feature(); + let node_type = self.feature_type.node_type(); - Self::node_value_byte_cost_size(key_len, value_cost, is_sum_node) + Self::node_value_byte_cost_size(key_len, value_cost, node_type) } /// Costs based on predefined types (Trees, SumTrees, SumItems) that behave diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index 6c372d87..5e45b6fd 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -3,6 +3,8 @@ #[cfg(feature = "minimal")] use std::io::{Read, Write}; +#[cfg(feature = "minimal")] +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; #[cfg(feature = "minimal")] use ed::{Decode, Encode, Result, Terminated}; #[cfg(feature = "minimal")] @@ -11,8 +13,11 @@ use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; #[cfg(feature = "minimal")] use super::{hash::CryptoHash, TreeNode}; #[cfg(feature = "minimal")] +use crate::merk::NodeType; +#[cfg(feature = "minimal")] +use crate::tree::tree_feature_type::AggregateData; +#[cfg(feature = "minimal")] use crate::HASH_LENGTH_U32; - // TODO: optimize memory footprint #[cfg(feature = "minimal")] @@ -30,8 +35,8 @@ pub enum Link { child_heights: (u8, u8), /// Key key: Vec, - /// Sum - sum: Option, + /// Aggregate data like Sum + aggregate_data: AggregateData, }, /// Represents a tree node which has been modified since the `Tree`'s last @@ -57,8 +62,8 @@ pub enum Link { child_heights: (u8, u8), /// Tree tree: TreeNode, - /// Sum - sum: Option, + /// Aggregate data like Sum + aggregate_data: AggregateData, }, /// Represents a tree node which has not been modified, has an up-to-date @@ -70,8 +75,8 @@ pub enum Link { child_heights: (u8, u8), /// Tree tree: TreeNode, - /// Sum - sum: Option, + /// Aggregate data like Sum + aggregate_data: AggregateData, }, } @@ -160,12 +165,12 @@ impl Link { /// of variant `Link::Modified` since we have not yet recomputed the tree's /// hash. #[inline] - pub const fn sum(&self) -> Option { + pub const fn aggregate_data(&self) -> AggregateData { match self { Link::Modified { .. } => panic!("Cannot get hash from modified link"), - Link::Reference { sum, .. } => *sum, - Link::Uncommitted { sum, .. } => *sum, - Link::Loaded { sum, .. } => *sum, + Link::Reference { aggregate_data, .. } => *aggregate_data, + Link::Uncommitted { aggregate_data, .. } => *aggregate_data, + Link::Loaded { aggregate_data, .. } => *aggregate_data, } } @@ -213,12 +218,12 @@ impl Link { Link::Uncommitted { .. } => panic!("Cannot prune Uncommitted tree"), Link::Loaded { hash, - sum, + aggregate_data, child_heights, tree, } => Self::Reference { hash, - sum, + aggregate_data, child_heights, key: tree.take_key(), }, @@ -251,8 +256,8 @@ impl Link { // Costs for operations within a single merk #[inline] /// Encoded link size - pub const fn encoded_link_size(not_prefixed_key_len: u32, is_sum_tree: bool) -> u32 { - let sum_tree_cost = if is_sum_tree { 8 } else { 0 }; + pub const fn encoded_link_size(not_prefixed_key_len: u32, node_type: NodeType) -> u32 { + let sum_tree_cost = node_type.cost(); // Links are optional values that represent the right or left node for a given // 1 byte to represent key_length (this is a u8) // key_length to represent the actual key @@ -269,9 +274,13 @@ impl Link { debug_assert!(self.key().len() < 256, "Key length must be less than 256"); Ok(match self { - Link::Reference { key, sum, .. } => match sum { - None => key.len() + 36, // 1 + HASH_LENGTH + 2 + 1, - Some(_sum_value) => { + Link::Reference { + key, + aggregate_data, + .. + } => match aggregate_data { + AggregateData::NoAggregateData => key.len() + 36, // 1 + HASH_LENGTH + 2 + 1, + AggregateData::Count(_) | AggregateData::Sum(_) => { // 1 for key len // key_len for keys // 32 for hash @@ -282,14 +291,36 @@ impl Link { // sum_len for sum vale key.len() + 44 // 1 + 32 + 2 + 1 + 8 } + AggregateData::BigSum(_) | AggregateData::CountAndSum(..) => { + // 1 for key len + // key_len for keys + // 32 for hash + // 2 for child heights + // 1 to represent presence of sum value + // if above is 1, then + // 1 for sum len + // sum_len for sum vale + key.len() + 52 // 1 + 32 + 2 + 1 + 16 + } }, Link::Modified { .. } => panic!("No encoding for Link::Modified"), - Link::Uncommitted { tree, sum, .. } | Link::Loaded { tree, sum, .. } => match sum { - None => tree.key().len() + 36, // 1 + 32 + 2 + 1, - Some(sum_value) => { - let _encoded_sum_value = sum_value.encode_var_vec(); + Link::Uncommitted { + tree, + aggregate_data, + .. + } + | Link::Loaded { + tree, + aggregate_data, + .. + } => match aggregate_data { + AggregateData::NoAggregateData => tree.key().len() + 36, // 1 + 32 + 2 + 1, + AggregateData::Count(_) | AggregateData::Sum(_) => { tree.key().len() + 44 // 1 + 32 + 2 + 1 + 8 } + AggregateData::BigSum(_) | AggregateData::CountAndSum(..) => { + tree.key().len() + 52 // 1 + 32 + 2 + 1 + 16 + } }, }) } @@ -299,25 +330,25 @@ impl Link { impl Encode for Link { #[inline] fn encode_into(&self, out: &mut W) -> Result<()> { - let (hash, sum, key, (left_height, right_height)) = match self { + let (hash, aggregate_data, key, (left_height, right_height)) = match self { Link::Reference { hash, - sum, + aggregate_data, key, child_heights, - } => (hash, sum, key.as_slice(), child_heights), + } => (hash, aggregate_data, key.as_slice(), child_heights), Link::Loaded { hash, - sum, + aggregate_data, tree, child_heights, - } => (hash, sum, tree.key(), child_heights), + } => (hash, aggregate_data, tree.key(), child_heights), Link::Uncommitted { hash, - sum, + aggregate_data, tree, child_heights, - } => (hash, sum, tree.key(), child_heights), + } => (hash, aggregate_data, tree.key(), child_heights), Link::Modified { .. } => panic!("No encoding for Link::Modified"), }; @@ -331,13 +362,26 @@ impl Encode for Link { out.write_all(&[*left_height, *right_height])?; - match sum { - None => { + match aggregate_data { + AggregateData::NoAggregateData => { out.write_all(&[0])?; } - Some(sum_value) => { + AggregateData::Sum(sum_value) => { out.write_all(&[1])?; - out.write_varint(sum_value.to_owned())?; + out.write_varint(*sum_value)?; + } + AggregateData::BigSum(big_sum_value) => { + out.write_all(&[2])?; + out.write_i128::(*big_sum_value)?; + } + AggregateData::Count(count_value) => { + out.write_all(&[3])?; + out.write_varint(*count_value)?; + } + AggregateData::CountAndSum(count_value, sum_value) => { + out.write_all(&[4])?; + out.write_varint(*count_value)?; + out.write_varint(*sum_value)?; } } @@ -349,9 +393,13 @@ impl Encode for Link { debug_assert!(self.key().len() < 256, "Key length must be less than 256"); Ok(match self { - Link::Reference { key, sum, .. } => match sum { - None => key.len() + 36, // 1 + 32 + 2 + 1 - Some(sum_value) => { + Link::Reference { + key, + aggregate_data, + .. + } => match aggregate_data { + AggregateData::NoAggregateData => key.len() + 36, // 1 + 32 + 2 + 1 + AggregateData::Sum(sum_value) => { let encoded_sum_value = sum_value.encode_var_vec(); // 1 for key len // key_len for keys @@ -363,14 +411,63 @@ impl Encode for Link { // sum_len for sum vale key.len() + encoded_sum_value.len() + 36 // 1 + 32 + 2 + 1 } + AggregateData::BigSum(_) => { + // 1 for key len + // key_len for keys + // 32 for hash + // 2 for child heights + // 1 to represent presence of sum value + // if above is 1, then + // 1 for sum len + // sum_len for sum vale + key.len() + 52 // 1 + 32 + 2 + 1 + 16 + } + AggregateData::Count(count) => { + let encoded_count_value = count.encode_var_vec(); + // 1 for key len + // key_len for keys + // 32 for hash + // 2 for child heights + // 1 to represent presence of sum value + // if above is 1, then + // 1 for sum len + // sum_len for sum vale + key.len() + encoded_count_value.len() + 36 // 1 + 32 + 2 + 1 + } + AggregateData::CountAndSum(count, sum) => { + let encoded_sum_value = sum.encode_var_vec(); + let encoded_count_value = count.encode_var_vec(); + key.len() + encoded_sum_value.len() + encoded_count_value.len() + 36 + } }, Link::Modified { .. } => panic!("No encoding for Link::Modified"), - Link::Uncommitted { tree, sum, .. } | Link::Loaded { tree, sum, .. } => match sum { - None => tree.key().len() + 36, // 1 + 32 + 2 + 1 - Some(sum_value) => { + Link::Uncommitted { + tree, + aggregate_data, + .. + } + | Link::Loaded { + tree, + aggregate_data, + .. + } => match aggregate_data { + AggregateData::NoAggregateData => tree.key().len() + 36, // 1 + 32 + 2 + 1 + AggregateData::Sum(sum_value) => { let encoded_sum_value = sum_value.encode_var_vec(); tree.key().len() + encoded_sum_value.len() + 36 // 1 + 32 + 2 + 1 } + AggregateData::BigSum(_) => { + tree.key().len() + 52 // 1 + 32 + 2 + 1 + 16 + } + AggregateData::Count(count_value) => { + let encoded_count_value = count_value.encode_var_vec(); + tree.key().len() + encoded_count_value.len() + 36 // 1 + 32 + 2 + 1 + } + AggregateData::CountAndSum(count, sum) => { + let encoded_sum_value = sum.encode_var_vec(); + let encoded_count_value = count.encode_var_vec(); + tree.key().len() + encoded_sum_value.len() + encoded_count_value.len() + 36 + } }, }) } @@ -383,7 +480,7 @@ impl Link { Self::Reference { key: Vec::with_capacity(64), hash: Default::default(), - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (0, 0), } } @@ -407,7 +504,7 @@ impl Decode for Link { } if let Link::Reference { - ref mut sum, + ref mut aggregate_data, ref mut key, ref mut hash, ref mut child_heights, @@ -423,14 +520,27 @@ impl Decode for Link { child_heights.0 = read_u8(&mut input)?; child_heights.1 = read_u8(&mut input)?; - let has_sum = read_u8(&mut input)?; - *sum = match has_sum { - 0 => None, + let aggregate_data_byte = read_u8(&mut input)?; + *aggregate_data = match aggregate_data_byte { + 0 => AggregateData::NoAggregateData, 1 => { let encoded_sum: i64 = input.read_varint()?; - Some(encoded_sum) + AggregateData::Sum(encoded_sum) } - _ => return Err(ed::Error::UnexpectedByte(55)), + 2 => { + let encoded_big_sum: i128 = input.read_i128::()?; + AggregateData::BigSum(encoded_big_sum) + } + 3 => { + let encoded_count: u64 = input.read_varint()?; + AggregateData::Count(encoded_count) + } + 4 => { + let encoded_count: u64 = input.read_varint()?; + let encoded_sum: i64 = input.read_varint()?; + AggregateData::CountAndSum(encoded_count, encoded_sum) + } + byte => return Err(ed::Error::UnexpectedByte(byte)), }; } else { unreachable!() @@ -487,7 +597,7 @@ mod test { #[test] fn types() { let hash = NULL_HASH; - let sum = None; + let aggregate_data = AggregateData::NoAggregateData; let child_heights = (0, 0); let pending_writes = 1; let key = vec![0]; @@ -495,7 +605,7 @@ mod test { let reference = Link::Reference { hash, - sum, + aggregate_data, child_heights, key, }; @@ -506,13 +616,13 @@ mod test { }; let uncommitted = Link::Uncommitted { hash, - sum, + aggregate_data, child_heights, tree: tree(), }; let loaded = Link::Loaded { hash, - sum, + aggregate_data, child_heights, tree: tree(), }; @@ -578,7 +688,7 @@ mod test { fn uncommitted_into_reference() { Link::Uncommitted { hash: [1; 32], - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (1, 1), tree: TreeNode::new(vec![0], vec![1], None, BasicMerkNode).unwrap(), } @@ -589,7 +699,7 @@ mod test { fn encode_link() { let link = Link::Reference { key: vec![1, 2, 3], - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (123, 124), hash: [55; 32], }; @@ -610,7 +720,7 @@ mod test { fn encode_link_with_sum() { let link = Link::Reference { key: vec![1, 2, 3], - sum: Some(50), + aggregate_data: AggregateData::Sum(50), child_heights: (123, 124), hash: [55; 32], }; @@ -629,12 +739,59 @@ mod test { ); } + #[test] + fn encode_link_with_count() { + let link = Link::Reference { + key: vec![1, 2, 3], + aggregate_data: AggregateData::Count(50), + child_heights: (123, 124), + hash: [55; 32], + }; + assert_eq!(link.encoding_length().unwrap(), 40); + + let mut bytes = vec![]; + link.encode_into(&mut bytes).unwrap(); + + assert_eq!(link.encoding_length().unwrap(), bytes.len()); + assert_eq!( + bytes, + vec![ + 3, 1, 2, 3, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, + 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 123, 124, 3, 50, + ] + ); + } + + #[test] + fn encode_link_with_big_sum() { + let link = Link::Reference { + key: vec![1, 2, 3], + aggregate_data: AggregateData::BigSum(50), + child_heights: (123, 124), + hash: [55; 32], + }; + assert_eq!(link.encoding_length().unwrap(), 55); + + let mut bytes = vec![]; + link.encode_into(&mut bytes).unwrap(); + + assert_eq!(link.encoding_length().unwrap(), bytes.len()); + assert_eq!( + bytes, + vec![ + 3, 1, 2, 3, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, + 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 123, 124, 2, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50 + ] + ); + } + #[test] #[should_panic] fn encode_link_long_key() { let link = Link::Reference { key: vec![123; 300], - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (123, 124), hash: [55; 32], }; @@ -649,6 +806,6 @@ mod test { 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 123, 124, 0, ]; let link = Link::decode(bytes.as_slice()).expect("expected to decode a link"); - assert_eq!(link.sum(), None); + assert_eq!(link.aggregate_data(), AggregateData::NoAggregateData); } } diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index e87865a2..460edbce 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -59,11 +59,15 @@ use kv::KV; pub use link::Link; #[cfg(feature = "minimal")] pub use ops::{AuxMerkBatch, BatchEntry, MerkBatch, Op, PanicSource}; +#[cfg(feature = "minimal")] +pub use tree_feature_type::AggregateData; #[cfg(any(feature = "minimal", feature = "verify"))] pub use tree_feature_type::TreeFeatureType; #[cfg(feature = "minimal")] pub use walk::{Fetch, RefWalker, Walker}; +#[cfg(feature = "minimal")] +use crate::merk::NodeType; #[cfg(feature = "minimal")] use crate::tree::hash::HASH_LENGTH_X2; #[cfg(feature = "minimal")] @@ -91,6 +95,11 @@ impl TreeNodeInner { self.kv.value } + /// Get the value as owned of the key value struct + pub fn value_as_owned_with_feature(self) -> (Vec, TreeFeatureType) { + (self.kv.value, self.kv.feature_type) + } + /// Get the value as slice of the key value struct pub fn value_as_slice(&self) -> &[u8] { self.kv.value.as_slice() @@ -155,9 +164,9 @@ impl TreeNode { } } - /// Is sum node? - pub fn is_sum_node(&self) -> bool { - self.inner.kv.feature_type.is_sum_feature() + /// the node type + pub fn node_type(&self) -> NodeType { + self.inner.kv.feature_type.node_type() } pub fn storage_cost_for_update(current_value_byte_cost: u32, old_cost: u32) -> StorageCost { @@ -250,7 +259,7 @@ impl TreeNode { KV::value_byte_cost_size_for_key_and_value_lengths( key_len, value_len as u32, - self.inner.kv.feature_type.is_sum_feature(), + self.inner.kv.feature_type.node_type(), ) } else { self.inner.kv.value_byte_cost_size() @@ -447,9 +456,15 @@ impl TreeNode { ( // 36 = 32 Hash + 1 key length + 2 child heights + 1 feature type link.key().len() as u32 + 36, - link.sum() - .map(|s| s.encode_var_vec().len() as u32) - .unwrap_or_default(), + match link.aggregate_data() { + AggregateData::NoAggregateData => 0, + AggregateData::Sum(s) => s.encode_var_vec().len() as u32, + AggregateData::BigSum(_) => 16 as u32, + AggregateData::Count(c) => c.encode_var_vec().len() as u32, + AggregateData::CountAndSum(c, s) => { + s.encode_var_vec().len() as u32 + c.encode_var_vec().len() as u32 + } + }, ) }) } @@ -490,9 +505,49 @@ impl TreeNode { /// Returns the sum of the root node's child on the given side, if any. If /// there is no child, returns 0. #[inline] - pub fn child_sum(&self, left: bool) -> i64 { + pub fn child_aggregate_sum_data_as_i64(&self, left: bool) -> Result { + match self.link(left) { + Some(link) => match link.aggregate_data() { + AggregateData::NoAggregateData => Ok(0), + AggregateData::Sum(s) => Ok(s), + AggregateData::BigSum(_) => Err(Error::BigSumTreeUnderNormalSumTree( + "for aggregate data as i64".to_string(), + )), + AggregateData::Count(_) => Ok(0), + AggregateData::CountAndSum(_, s) => Ok(s), + }, + _ => Ok(0), + } + } + + /// Returns the sum of the root node's child on the given side, if any. If + /// there is no child, returns 0. + #[inline] + pub fn child_aggregate_count_data_as_u64(&self, left: bool) -> Result { match self.link(left) { - Some(link) => link.sum().unwrap_or_default(), + Some(link) => match link.aggregate_data() { + AggregateData::NoAggregateData => Ok(0), + AggregateData::Sum(_) => Ok(0), + AggregateData::BigSum(_) => Ok(0), + AggregateData::Count(c) => Ok(c), + AggregateData::CountAndSum(c, _) => Ok(c), + }, + _ => Ok(0), + } + } + + /// Returns the sum of the root node's child on the given side, if any. If + /// there is no child, returns 0. + #[inline] + pub fn child_aggregate_sum_data_as_i128(&self, left: bool) -> i128 { + match self.link(left) { + Some(link) => match link.aggregate_data() { + AggregateData::NoAggregateData => 0, + AggregateData::Sum(s) => s as i128, + AggregateData::BigSum(s) => s, + AggregateData::Count(_) => 0, + AggregateData::CountAndSum(_, s) => s as i128, + }, _ => 0, } } @@ -510,14 +565,52 @@ impl TreeNode { /// Computes and returns the hash of the root node. #[inline] - pub fn sum(&self) -> Result, Error> { + pub fn aggregate_data(&self) -> Result { match self.inner.kv.feature_type { - TreeFeatureType::BasicMerkNode => Ok(None), - TreeFeatureType::SummedMerkNode(value) => value - .checked_add(self.child_sum(true)) - .and_then(|a| a.checked_add(self.child_sum(false))) - .ok_or(Overflow("sum is overflowing")) - .map(Some), + TreeFeatureType::BasicMerkNode => Ok(AggregateData::NoAggregateData), + TreeFeatureType::SummedMerkNode(value) => { + let left = self.child_aggregate_sum_data_as_i64(true)?; + let right = self.child_aggregate_sum_data_as_i64(false)?; + value + .checked_add(left) + .and_then(|a| a.checked_add(right)) + .ok_or(Overflow("sum is overflowing")) + .map(AggregateData::Sum) + } + TreeFeatureType::BigSummedMerkNode(value) => value + .checked_add(self.child_aggregate_sum_data_as_i128(true)) + .and_then(|a| a.checked_add(self.child_aggregate_sum_data_as_i128(false))) + .ok_or(Overflow("big sum is overflowing")) + .map(AggregateData::BigSum), + TreeFeatureType::CountedMerkNode(value) => { + let left = self.child_aggregate_count_data_as_u64(true)?; + let right = self.child_aggregate_count_data_as_u64(false)?; + value + .checked_add(left) + .and_then(|a| a.checked_add(right)) + .ok_or(Overflow("count is overflowing")) + .map(AggregateData::Count) + } + TreeFeatureType::CountedSummedMerkNode(count_value, sum_value) => { + let left_count = self.child_aggregate_count_data_as_u64(true)?; + let right_count = self.child_aggregate_count_data_as_u64(false)?; + let left_sum = self.child_aggregate_sum_data_as_i64(true)?; + let right_sum = self.child_aggregate_sum_data_as_i64(false)?; + let aggregated_count_value = count_value + .checked_add(left_count) + .and_then(|a| a.checked_add(right_count)) + .ok_or(Overflow("count is overflowing"))?; + + let aggregated_sum_value = sum_value + .checked_add(left_sum) + .and_then(|a| a.checked_add(right_sum)) + .ok_or(Overflow("count is overflowing"))?; + + Ok(AggregateData::CountAndSum( + aggregated_count_value, + aggregated_sum_value, + )) + } } } @@ -936,13 +1029,13 @@ impl TreeNode { { // println!("key is {}", std::str::from_utf8(tree.key()).unwrap()); cost_return_on_error!(&mut cost, tree.commit(c, old_specialized_cost,)); - let sum = cost_return_on_error_default!(tree.sum()); + let aggregate_data = cost_return_on_error_default!(tree.aggregate_data()); self.inner.left = Some(Link::Loaded { hash: tree.hash().unwrap_add_cost(&mut cost), tree, child_heights, - sum, + aggregate_data, }); } else { unreachable!() @@ -959,12 +1052,12 @@ impl TreeNode { { // println!("key is {}", std::str::from_utf8(tree.key()).unwrap()); cost_return_on_error!(&mut cost, tree.commit(c, old_specialized_cost,)); - let sum = cost_return_on_error_default!(tree.sum()); + let aggregate_data = cost_return_on_error_default!(tree.aggregate_data()); self.inner.right = Some(Link::Loaded { hash: tree.hash().unwrap_add_cost(&mut cost), tree, child_heights, - sum, + aggregate_data, }); } else { unreachable!() @@ -1001,13 +1094,13 @@ impl TreeNode { { // TODO: return Err instead of panic? let link = self.link(left).expect("Expected link"); - let (child_heights, hash, sum) = match link { + let (child_heights, hash, aggregate_data) = match link { Link::Reference { child_heights, hash, - sum, + aggregate_data, .. - } => (child_heights, hash, sum), + } => (child_heights, hash, aggregate_data), _ => panic!("Expected Some(Link::Reference)"), }; @@ -1021,7 +1114,7 @@ impl TreeNode { tree, hash: *hash, child_heights: *child_heights, - sum: *sum, + aggregate_data: *aggregate_data, }); Ok(()).wrap_with_cost(cost) } @@ -1041,7 +1134,7 @@ pub const fn side_to_str(left: bool) -> &'static str { #[cfg(test)] mod test { - use super::{commit::NoopCommit, hash::NULL_HASH, TreeNode}; + use super::{commit::NoopCommit, hash::NULL_HASH, AggregateData, TreeNode}; use crate::tree::{ tree_feature_type::TreeFeatureType::SummedMerkNode, TreeFeatureType::BasicMerkNode, }; @@ -1250,6 +1343,10 @@ mod test { .unwrap() .expect("commit failed"); - assert_eq!(Some(8), tree.sum().expect("expected to get sum from tree")); + assert_eq!( + AggregateData::Sum(8), + tree.aggregate_data() + .expect("expected to get sum from tree") + ); } } diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 66fcb716..2e2cf3fd 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -1100,7 +1100,7 @@ mod test { None, Some(Link::Loaded { hash: [123; 32], - sum: None, + aggregate_data: AggregateData::NoAggregateData, child_heights: (0, 0), tree: TreeNode::new(b"foo2".to_vec(), b"bar2".to_vec(), None, BasicMerkNode) .unwrap(), diff --git a/merk/src/tree/tree_feature_type.rs b/merk/src/tree/tree_feature_type.rs index bb815dca..579e5032 100644 --- a/merk/src/tree/tree_feature_type.rs +++ b/merk/src/tree/tree_feature_type.rs @@ -3,15 +3,25 @@ #[cfg(any(feature = "minimal", feature = "verify"))] use std::io::{Read, Write}; +#[cfg(any(feature = "minimal", feature = "verify"))] +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; #[cfg(feature = "minimal")] use ed::Terminated; #[cfg(any(feature = "minimal", feature = "verify"))] use ed::{Decode, Encode}; #[cfg(any(feature = "minimal", feature = "verify"))] +use grovedb_costs::TreeCostType; +#[cfg(any(feature = "minimal", feature = "verify"))] use integer_encoding::{VarInt, VarIntReader, VarIntWriter}; +#[cfg(feature = "minimal")] +use crate::merk::NodeType; #[cfg(any(feature = "minimal", feature = "verify"))] -use crate::tree::tree_feature_type::TreeFeatureType::{BasicMerkNode, SummedMerkNode}; +use crate::tree::tree_feature_type::TreeFeatureType::{ + BasicMerkNode, BigSummedMerkNode, CountedMerkNode, CountedSummedMerkNode, SummedMerkNode, +}; +#[cfg(feature = "minimal")] +use crate::tree_type::TreeType; #[cfg(any(feature = "minimal", feature = "verify"))] #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -21,23 +31,121 @@ pub enum TreeFeatureType { BasicMerkNode, /// Summed Merk Tree Node SummedMerkNode(i64), + /// Big Summed Merk Tree Node + BigSummedMerkNode(i128), + /// Counted Merk Tree None + CountedMerkNode(u64), + /// Counted and summed Merk Tree None + CountedSummedMerkNode(u64, i64), } #[cfg(feature = "minimal")] impl TreeFeatureType { - #[inline] - /// Get length of encoded SummedMerk - pub fn sum_length(&self) -> Option { + pub fn node_type(&self) -> NodeType { match self { - BasicMerkNode => None, - SummedMerkNode(m) => Some(m.encode_var_vec().len() as u32), + BasicMerkNode => NodeType::NormalNode, + SummedMerkNode(_) => NodeType::SumNode, + BigSummedMerkNode(_) => NodeType::BigSumNode, + CountedMerkNode(_) => NodeType::CountNode, + CountedSummedMerkNode(..) => NodeType::CountSumNode, + } + } +} + +#[cfg(feature = "minimal")] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum AggregateData { + NoAggregateData, + Sum(i64), + BigSum(i128), + Count(u64), + CountAndSum(u64, i64), +} + +#[cfg(feature = "minimal")] +impl AggregateData { + pub fn parent_tree_type(&self) -> TreeType { + match self { + AggregateData::NoAggregateData => TreeType::NormalTree, + AggregateData::Sum(_) => TreeType::SumTree, + AggregateData::BigSum(_) => TreeType::BigSumTree, + AggregateData::Count(_) => TreeType::CountTree, + AggregateData::CountAndSum(..) => TreeType::CountSumTree, + } + } + + pub fn as_sum_i64(&self) -> i64 { + match self { + AggregateData::NoAggregateData => 0, + AggregateData::Sum(s) => *s, + AggregateData::BigSum(i) => { + let max = i64::MAX as i128; + if *i > max { + i64::MAX + } else { + *i as i64 + } + } + AggregateData::Count(_) => 0, + AggregateData::CountAndSum(_, s) => *s, + } + } + + pub fn as_count_u64(&self) -> u64 { + match self { + AggregateData::NoAggregateData => 0, + AggregateData::Sum(_) => 0, + AggregateData::BigSum(_) => 0, + AggregateData::Count(c) => *c, + AggregateData::CountAndSum(c, _) => *c, + } + } + + pub fn as_summed_i128(&self) -> i128 { + match self { + AggregateData::NoAggregateData => 0, + AggregateData::Sum(s) => *s as i128, + AggregateData::BigSum(i) => *i, + AggregateData::Count(_) => 0, + AggregateData::CountAndSum(_, s) => *s as i128, } } +} + +#[cfg(feature = "minimal")] +impl From for AggregateData { + fn from(value: TreeFeatureType) -> Self { + match value { + BasicMerkNode => AggregateData::NoAggregateData, + SummedMerkNode(val) => AggregateData::Sum(val), + BigSummedMerkNode(val) => AggregateData::BigSum(val), + CountedMerkNode(val) => AggregateData::Count(val), + CountedSummedMerkNode(count, sum) => AggregateData::CountAndSum(count, sum), + } + } +} +#[cfg(feature = "minimal")] +impl TreeFeatureType { #[inline] - /// Is sum feature? - pub fn is_sum_feature(&self) -> bool { - matches!(self, SummedMerkNode(_)) + /// Get length of encoded SummedMerk + pub fn tree_feature_specialized_type_and_length(&self) -> Option<(TreeCostType, u32)> { + match self { + BasicMerkNode => None, + SummedMerkNode(m) => Some(( + TreeCostType::TreeFeatureUsesVarIntCostAs8Bytes, + m.encode_var_vec().len() as u32, + )), + BigSummedMerkNode(_) => Some((TreeCostType::TreeFeatureUses16Bytes, 16)), + CountedMerkNode(m) => Some(( + TreeCostType::TreeFeatureUsesVarIntCostAs8Bytes, + m.encode_var_vec().len() as u32, + )), + CountedSummedMerkNode(count, sum) => Some(( + TreeCostType::TreeFeatureUsesTwoVarIntsCostAs16Bytes, + count.encode_var_vec().len() as u32 + sum.encode_var_vec().len() as u32, + )), + } } #[inline] @@ -46,6 +154,9 @@ impl TreeFeatureType { match self { BasicMerkNode => 1, SummedMerkNode(_sum) => 9, + BigSummedMerkNode(_) => 17, + CountedMerkNode(_) => 9, + CountedSummedMerkNode(..) => 17, } } } @@ -53,6 +164,7 @@ impl TreeFeatureType { #[cfg(feature = "minimal")] impl Terminated for TreeFeatureType {} +#[cfg(any(feature = "minimal", feature = "verify"))] impl Encode for TreeFeatureType { #[inline] fn encode_into(&self, dest: &mut W) -> ed::Result<()> { @@ -63,7 +175,23 @@ impl Encode for TreeFeatureType { } SummedMerkNode(sum) => { dest.write_all(&[1])?; - dest.write_varint(sum.to_owned())?; + dest.write_varint(*sum)?; + Ok(()) + } + BigSummedMerkNode(sum) => { + dest.write_all(&[2])?; + dest.write_i128::(*sum)?; + Ok(()) + } + CountedMerkNode(count) => { + dest.write_all(&[3])?; + dest.write_varint(*count)?; + Ok(()) + } + CountedSummedMerkNode(count, sum) => { + dest.write_all(&[4])?; + dest.write_varint(*count)?; + dest.write_varint(*sum)?; Ok(()) } } @@ -79,6 +207,18 @@ impl Encode for TreeFeatureType { // encoded_sum.len() for the length of the encoded vector Ok(1 + encoded_sum.len()) } + BigSummedMerkNode(_) => Ok(17), + CountedMerkNode(count) => { + let encoded_sum = count.encode_var_vec(); + // 1 for the enum type + // encoded_sum.len() for the length of the encoded vector + Ok(1 + encoded_sum.len()) + } + CountedSummedMerkNode(count, sum) => { + let encoded_lengths = count.encode_var_vec().len() + sum.encode_var_vec().len(); + // 1 for the enum type + Ok(1 + encoded_lengths) + } } } } @@ -95,6 +235,19 @@ impl Decode for TreeFeatureType { let encoded_sum: i64 = input.read_varint()?; Ok(SummedMerkNode(encoded_sum)) } + [2] => { + let encoded_sum: i128 = input.read_i128::()?; + Ok(BigSummedMerkNode(encoded_sum)) + } + [3] => { + let encoded_count: u64 = input.read_varint()?; + Ok(CountedMerkNode(encoded_count)) + } + [4] => { + let encoded_count: u64 = input.read_varint()?; + let encoded_sum: i64 = input.read_varint()?; + Ok(CountedSummedMerkNode(encoded_count, encoded_sum)) + } _ => Err(ed::Error::UnexpectedByte(55)), } } diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 3a1998c9..834643a1 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -403,7 +403,7 @@ mod test { use grovedb_version::version::GroveVersion; use super::{super::NoopCommit, *}; - use crate::tree::{TreeFeatureType::BasicMerkNode, TreeNode}; + use crate::tree::{AggregateData, TreeFeatureType::BasicMerkNode, TreeNode}; #[derive(Clone)] struct MockSource {} @@ -491,7 +491,7 @@ mod test { hash: Default::default(), key: b"foo".to_vec(), child_heights: (0, 0), - sum: None, + aggregate_data: AggregateData::NoAggregateData, }), None, BasicMerkNode, diff --git a/merk/src/tree_type.rs b/merk/src/tree_type.rs new file mode 100644 index 00000000..6432f232 --- /dev/null +++ b/merk/src/tree_type.rs @@ -0,0 +1,81 @@ +use std::fmt; + +#[cfg(feature = "minimal")] +use crate::merk::NodeType; +use crate::{Error, TreeFeatureType}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum MaybeTree { + Tree(TreeType), + NotTree, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum TreeType { + NormalTree = 0, + SumTree = 1, + BigSumTree = 2, + CountTree = 3, + CountSumTree = 4, +} + +impl TryFrom for TreeType { + type Error = Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(TreeType::NormalTree), + 1 => Ok(TreeType::SumTree), + 2 => Ok(TreeType::BigSumTree), + 3 => Ok(TreeType::CountTree), + 4 => Ok(TreeType::CountSumTree), + n => Err(Error::UnknownTreeType(format!("got {}, max is 4", n))), // Error handling + } + } +} + +impl fmt::Display for TreeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match *self { + TreeType::NormalTree => "Normal Tree", + TreeType::SumTree => "Sum Tree", + TreeType::BigSumTree => "Big Sum Tree", + TreeType::CountTree => "Count Tree", + TreeType::CountSumTree => "Count Sum Tree", + }; + write!(f, "{}", s) + } +} + +impl TreeType { + pub fn allows_sum_item(&self) -> bool { + match self { + TreeType::NormalTree => false, + TreeType::SumTree => true, + TreeType::BigSumTree => true, + TreeType::CountTree => false, + TreeType::CountSumTree => true, + } + } + + #[cfg(feature = "minimal")] + pub const fn inner_node_type(&self) -> NodeType { + match self { + TreeType::NormalTree => NodeType::NormalNode, + TreeType::SumTree => NodeType::SumNode, + TreeType::BigSumTree => NodeType::BigSumNode, + TreeType::CountTree => NodeType::CountNode, + TreeType::CountSumTree => NodeType::CountSumNode, + } + } + + pub fn empty_tree_feature_type(&self) -> TreeFeatureType { + match self { + TreeType::NormalTree => TreeFeatureType::BasicMerkNode, + TreeType::SumTree => TreeFeatureType::SummedMerkNode(0), + TreeType::BigSumTree => TreeFeatureType::BigSummedMerkNode(0), + TreeType::CountTree => TreeFeatureType::CountedMerkNode(0), + TreeType::CountSumTree => TreeFeatureType::CountedSummedMerkNode(0, 0), + } + } +} diff --git a/node-grove/Cargo.toml b/node-grove/Cargo.toml index bd91146b..3654bd5b 100644 --- a/node-grove/Cargo.toml +++ b/node-grove/Cargo.toml @@ -10,8 +10,8 @@ exclude = ["index.node"] crate-type = ["cdylib"] [dependencies] -grovedb = { version = "2.2.1", path = "../grovedb", features = ["full", "estimated_costs"] } -grovedb-version = { version = "2.2.1", path = "../grovedb-version" } +grovedb = { version = "3.0.0", path = "../grovedb", features = ["full", "estimated_costs"] } +grovedb-version = { version = "3.0.0", path = "../grovedb-version" } [dependencies.neon] version = "0.10.1" diff --git a/node-grove/src/converter.rs b/node-grove/src/converter.rs index a822faad..7ba3850c 100644 --- a/node-grove/src/converter.rs +++ b/node-grove/src/converter.rs @@ -38,6 +38,9 @@ fn element_to_string(element: Element) -> String { Element::Reference(..) => "reference".to_string(), Element::Tree(..) => "tree".to_string(), Element::SumTree(..) => "sum_tree".to_string(), + Element::BigSumTree(..) => "big_sum_tree".to_string(), + Element::CountTree(..) => "count_tree".to_string(), + Element::CountSumTree(..) => "count_sum_tree".to_string(), } } @@ -92,6 +95,9 @@ pub fn element_to_js_object<'a, C: Context<'a>>( Element::Reference(..) => nested_vecs_to_js(vec![], cx)?, Element::Tree(..) => nested_vecs_to_js(vec![], cx)?, Element::SumTree(..) => nested_vecs_to_js(vec![], cx)?, + Element::BigSumTree(..) => nested_vecs_to_js(vec![], cx)?, + Element::CountTree(..) => nested_vecs_to_js(vec![], cx)?, + Element::CountSumTree(..) => nested_vecs_to_js(vec![], cx)?, }; js_object.set(cx, "value", js_value)?; diff --git a/path/Cargo.toml b/path/Cargo.toml index f5f89be7..91738bfe 100644 --- a/path/Cargo.toml +++ b/path/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-path" -version = "2.2.1" +version = "3.0.0" edition = "2021" license = "MIT" description = "Path extension crate for GroveDB" @@ -9,3 +9,4 @@ documentation = "https://docs.rs/grovedb-path" repository = "https://github.com/dashpay/grovedb" [dependencies] +hex = "0.4.3" diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index 437f911a..ae8cd900 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -34,7 +34,10 @@ //! combined with it's various `From` implementations it can cover slices, owned //! subtree paths and other path references if use as generic [Into]. -use std::hash::{Hash, Hasher}; +use std::{ + fmt::{Display, Formatter}, + hash::{Hash, Hasher}, +}; use crate::{ subtree_path_builder::{SubtreePathBuilder, SubtreePathRelative}, @@ -48,6 +51,51 @@ pub struct SubtreePath<'b, B> { pub(crate) ref_variant: SubtreePathInner<'b, B>, } +fn hex_to_ascii(hex_value: &[u8]) -> String { + // Define the set of allowed characters + const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ + abcdefghijklmnopqrstuvwxyz\ + 0123456789_-/\\[]@"; + + // Check if all characters in hex_value are allowed + if hex_value.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { + // Try to convert to UTF-8 + String::from_utf8(hex_value.to_vec()) + .unwrap_or_else(|_| format!("0x{}", hex::encode(hex_value))) + } else { + // Hex encode and prepend "0x" + format!("0x{}", hex::encode(hex_value)) + } +} + +impl<'b, B: AsRef<[u8]>> Display for SubtreePath<'b, B> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match &self.ref_variant { + SubtreePathInner::Slice(slice) => { + let ascii_path = slice + .iter() + .map(|e| hex_to_ascii(e.as_ref())) + .collect::>() + .join("/"); + write!(f, "{}", ascii_path) + } + SubtreePathInner::SubtreePath(subtree_path) => { + let ascii_path = subtree_path + .to_vec() + .into_iter() + .map(|a| hex_to_ascii(a.as_slice())) + .collect::>() + .join("/"); + write!(f, "{}", ascii_path) + } + SubtreePathInner::SubtreePathIter(iter) => { + let ascii_path = iter.clone().map(hex_to_ascii).collect::>().join("/"); + write!(f, "{}", ascii_path) + } + } + } +} + /// Wrapped inner representation of subtree path ref. #[derive(Debug)] pub(crate) enum SubtreePathInner<'b, B> { diff --git a/path/src/util/compact_bytes.rs b/path/src/util/compact_bytes.rs index 1e4362cb..c44b6dd9 100644 --- a/path/src/util/compact_bytes.rs +++ b/path/src/util/compact_bytes.rs @@ -66,7 +66,7 @@ impl CompactBytes { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub(crate) struct CompactBytesIter<'a> { bytes: &'a CompactBytes, offset_back: usize, diff --git a/storage/Cargo.toml b/storage/Cargo.toml index e2db3bb6..409ddaed 100644 --- a/storage/Cargo.toml +++ b/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-storage" -version = "2.2.1" +version = "3.0.0" edition = "2021" license = "MIT" description = "Storage extension crate for GroveDB" @@ -9,9 +9,9 @@ documentation = "https://docs.rs/grovedb-storage" repository = "https://github.com/dashpay/grovedb" [dependencies] -grovedb-costs = { version = "2.2.1", path = "../costs" } -grovedb-path = { version = "2.2.1", path = "../path" } -grovedb-visualize = { version = "2.2.1", path = "../visualize" } +grovedb-costs = { version = "3.0.0", path = "../costs" } +grovedb-path = { version = "3.0.0", path = "../path" } +grovedb-visualize = { version = "3.0.0", path = "../visualize" } blake3 = { version = "1.5.1", optional = true } hex = "0.4.3" diff --git a/visualize/Cargo.toml b/visualize/Cargo.toml index 233341a2..60b09efd 100644 --- a/visualize/Cargo.toml +++ b/visualize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "grovedb-visualize" -version = "2.2.1" +version = "3.0.0" edition = "2021" license = "MIT" description = "Debug prints extension crate for GroveDB" From f92956f76b86cf5789a6f58acd41ca6890c6d8d7 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 21 Jan 2025 15:45:07 +0700 Subject: [PATCH 7/8] fix: fixed issue with query if no path element (#354) --- grovedb/src/element/get.rs | 39 +++++++++++++++ grovedb/src/element/query.rs | 92 +++++++++++++++++++----------------- 2 files changed, 88 insertions(+), 43 deletions(-) diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index 40868c77..2550e397 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -347,6 +347,45 @@ impl Element { Ok(absolute_element).wrap_with_cost(cost) } + #[cfg(feature = "minimal")] + /// Get an element from Merk under a key; path should be resolved and proper + /// Merk should be loaded by this moment + pub fn get_optional_with_absolute_refs<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( + merk: &Merk, + path: &[&[u8]], + key: K, + allow_cache: bool, + grove_version: &GroveVersion, + ) -> CostResult, Error> { + use crate::error::GroveDbErrorExt; + + check_grovedb_v0_with_cost!( + "get_with_absolute_refs", + grove_version + .grovedb_versions + .element + .get_with_absolute_refs + ); + let mut cost = OperationCost::default(); + + let maybe_element = cost_return_on_error!( + &mut cost, + Self::get_optional(merk, key.as_ref(), allow_cache, grove_version) + .add_context(format!("path is {}", path_as_slices_hex_to_ascii(path))) + ); + + match maybe_element { + None => Ok(None).wrap_with_cost(cost), + Some(element) => { + let absolute_element = cost_return_on_error_no_add!( + &cost, + element.convert_if_reference_to_absolute_reference(path, Some(key.as_ref())) + ); + Ok(Some(absolute_element)).wrap_with_cost(cost) + } + } + } + #[cfg(feature = "minimal")] /// Get an element's value hash from Merk under a key pub fn get_value_hash<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index 68e57056..cd4f5030 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -546,18 +546,20 @@ impl Element { subtree, grove_version, { - results.push(QueryResultElement::ElementResultItem( - cost_return_on_error!( - &mut cost, - Element::get_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ), - )); + if let Some(element) = cost_return_on_error!( + &mut cost, + Element::get_optional_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ) { + results.push(QueryResultElement::ElementResultItem( + element, + )); + } } ); } @@ -571,21 +573,23 @@ impl Element { subtree, grove_version, { - results.push(QueryResultElement::KeyElementPairResultItem( - ( - subquery_path_last_key.to_vec(), - cost_return_on_error!( - &mut cost, - Element::get_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ), - ), - )); + if let Some(element) = cost_return_on_error!( + &mut cost, + Element::get_optional_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ) { + results.push( + QueryResultElement::KeyElementPairResultItem(( + subquery_path_last_key.to_vec(), + element, + )), + ); + } } ); } @@ -599,22 +603,24 @@ impl Element { subtree, grove_version, { - results.push( - QueryResultElement::PathKeyElementTrioResultItem(( - path_vec.iter().map(|p| p.to_vec()).collect(), - subquery_path_last_key.to_vec(), - cost_return_on_error!( - &mut cost, - Element::get_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ), - )), - ); + if let Some(element) = cost_return_on_error!( + &mut cost, + Element::get_optional_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ) { + results.push( + QueryResultElement::PathKeyElementTrioResultItem(( + path_vec.iter().map(|p| p.to_vec()).collect(), + subquery_path_last_key.to_vec(), + element, + )), + ); + } } ); } From 44c2244bbccd3e6e684729e8cf620644f7ebbf70 Mon Sep 17 00:00:00 2001 From: fominok Date: Wed, 22 Jan 2025 00:48:47 +0100 Subject: [PATCH 8/8] fix/refactor: atomicity and caching (#347) * wip * fix * revert to pub * fix * address clippy warnings --- costs/src/context.rs | 16 +- grovedb-version/src/lib.rs | 38 +- .../src/version/grovedb_versions.rs | 3 + grovedb-version/src/version/v1.rs | 3 + grovedb-version/src/version/v2.rs | 3 + .../estimated_costs/average_case_costs.rs | 12 +- .../batch/estimated_costs/worst_case_costs.rs | 6 +- .../batch/just_in_time_reference_update.rs | 16 +- grovedb/src/batch/mod.rs | 506 ++++--------- grovedb/src/debugger.rs | 20 +- grovedb/src/element/delete.rs | 4 - grovedb/src/element/exists.rs | 6 +- grovedb/src/element/get.rs | 65 +- grovedb/src/element/helpers.rs | 8 +- grovedb/src/element/insert.rs | 8 +- grovedb/src/element/query.rs | 322 ++++---- grovedb/src/element/serialize.rs | 2 +- .../src/estimated_costs/average_case_costs.rs | 26 +- .../src/estimated_costs/worst_case_costs.rs | 19 +- grovedb/src/lib.rs | 500 ++---------- grovedb/src/merk_cache.rs | 271 +++++++ grovedb/src/operations/auxiliary.rs | 145 ++-- grovedb/src/operations/delete/average_case.rs | 14 +- .../src/operations/delete/delete_up_tree.rs | 17 +- grovedb/src/operations/delete/mod.rs | 484 ++++-------- grovedb/src/operations/delete/worst_case.rs | 10 +- grovedb/src/operations/get/average_case.rs | 2 +- grovedb/src/operations/get/mod.rs | 140 +--- grovedb/src/operations/get/query.rs | 18 +- grovedb/src/operations/get/worst_case.rs | 2 +- grovedb/src/operations/insert/mod.rs | 220 +----- grovedb/src/operations/is_empty_tree.rs | 31 +- grovedb/src/operations/proof/generate.rs | 12 +- grovedb/src/operations/proof/verify.rs | 7 +- grovedb/src/query/mod.rs | 8 +- grovedb/src/reference_path.rs | 712 +++++++++++++++++- grovedb/src/replication.rs | 124 +-- grovedb/src/tests/count_sum_tree_tests.rs | 35 +- grovedb/src/tests/count_tree_tests.rs | 50 +- grovedb/src/tests/mod.rs | 46 +- grovedb/src/tests/sum_tree_tests.rs | 103 ++- grovedb/src/tests/tree_hashes_tests.rs | 21 +- grovedb/src/util.rs | 494 +----------- grovedb/src/util/compat.rs | 131 ++++ grovedb/src/visualize.rs | 64 +- .../src/estimated_costs/average_case_costs.rs | 4 +- merk/src/merk/meta.rs | 111 +++ merk/src/merk/mod.rs | 71 +- merk/src/merk/open.rs | 25 +- merk/src/merk/restore.rs | 14 +- merk/src/merk/source.rs | 4 +- merk/src/merk/tree_type.rs | 78 ++ merk/src/proofs/chunk/chunk.rs | 6 +- merk/src/proofs/encoding.rs | 2 +- merk/src/proofs/query/map.rs | 4 +- merk/src/proofs/query/mod.rs | 2 +- merk/src/proofs/tree.rs | 22 +- merk/src/test_utils/mod.rs | 10 +- merk/src/test_utils/temp_merk.rs | 64 +- merk/src/tree/encoding.rs | 2 +- merk/src/tree/iter.rs | 2 +- merk/src/tree/mod.rs | 12 +- merk/src/tree/ops.rs | 6 +- merk/src/tree/walk/mod.rs | 8 +- merk/src/visualize.rs | 6 +- path/src/subtree_path.rs | 147 +++- path/src/subtree_path_builder.rs | 111 ++- path/src/subtree_path_iter.rs | 6 +- path/src/util/compact_bytes.rs | 46 +- path/src/util/cow_like.rs | 2 +- storage/src/rocksdb_storage.rs | 2 +- storage/src/rocksdb_storage/storage.rs | 57 +- .../src/rocksdb_storage/storage_context.rs | 2 - .../rocksdb_storage/storage_context/batch.rs | 2 +- .../storage_context/context_no_tx.rs | 286 ------- .../storage_context/raw_iterator.rs | 16 +- storage/src/rocksdb_storage/tests.rs | 62 +- storage/src/storage.rs | 30 +- 78 files changed, 2973 insertions(+), 2993 deletions(-) create mode 100644 grovedb/src/merk_cache.rs create mode 100644 grovedb/src/util/compat.rs create mode 100644 merk/src/merk/meta.rs create mode 100644 merk/src/merk/tree_type.rs delete mode 100644 storage/src/rocksdb_storage/storage_context/context_no_tx.rs diff --git a/costs/src/context.rs b/costs/src/context.rs index d69cb054..d661e3a1 100644 --- a/costs/src/context.rs +++ b/costs/src/context.rs @@ -116,6 +116,15 @@ impl CostResult { pub fn cost_as_result(self) -> Result { self.value.map(|_| self.cost) } + + /// Call the provided function on success without altering result or cost. + pub fn for_ok(self, f: impl FnOnce(&T)) -> CostResult { + if let Ok(x) = &self.value { + f(x) + } + + self + } } impl CostResult, E> { @@ -170,8 +179,9 @@ impl CostsExt for T {} /// 1. Early termination on error; /// 2. Because of 1, `Result` is removed from the equation; /// 3. `CostContext` is removed too because it is added to external cost -/// accumulator; 4. Early termination uses external cost accumulator so previous -/// costs won't be lost. +/// accumulator; +/// 4. Early termination uses external cost accumulator so previous costs won't +/// be lost. #[macro_export] macro_rules! cost_return_on_error { ( &mut $cost:ident, $($body:tt)+ ) => { @@ -193,7 +203,7 @@ macro_rules! cost_return_on_error { /// so no costs will be added except previously accumulated. #[macro_export] macro_rules! cost_return_on_error_no_add { - ( &$cost:ident, $($body:tt)+ ) => { + ( $cost:ident, $($body:tt)+ ) => { { use $crate::CostsExt; let result = { $($body)+ }; diff --git a/grovedb-version/src/lib.rs b/grovedb-version/src/lib.rs index f66019d4..59fb034c 100644 --- a/grovedb-version/src/lib.rs +++ b/grovedb-version/src/lib.rs @@ -1,4 +1,4 @@ -use crate::version::GroveVersion; +use version::GroveVersion; pub mod error; pub mod version; @@ -8,13 +8,15 @@ macro_rules! check_grovedb_v0_with_cost { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { - method: $method.to_string(), - known_versions: vec![EXPECTED_VERSION], - received: $version, - } - .into()) - .wrap_with_cost(OperationCost::default()); + return grovedb_costs::CostsExt::wrap_with_cost( + Err($crate::error::GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()), + Default::default(), + ); } }}; } @@ -24,7 +26,7 @@ macro_rules! check_grovedb_v0 { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { + return Err($crate::error::GroveVersionError::UnknownVersionMismatch { method: $method.to_string(), known_versions: vec![EXPECTED_VERSION], received: $version, @@ -56,13 +58,15 @@ macro_rules! check_merk_v0_with_cost { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { - method: $method.to_string(), - known_versions: vec![EXPECTED_VERSION], - received: $version, - } - .into()) - .wrap_with_cost(OperationCost::default()); + return grovedb_costs::CostsExt::wrap_with_cost( + Err($crate::error::GroveVersionError::UnknownVersionMismatch { + method: $method.to_string(), + known_versions: vec![EXPECTED_VERSION], + received: $version, + } + .into()), + Default::default(), + ); } }}; } @@ -72,7 +76,7 @@ macro_rules! check_merk_v0 { ($method:expr, $version:expr) => {{ const EXPECTED_VERSION: u16 = 0; if $version != EXPECTED_VERSION { - return Err(GroveVersionError::UnknownVersionMismatch { + return Err($crate::error::GroveVersionError::UnknownVersionMismatch { method: $method.to_string(), known_versions: vec![EXPECTED_VERSION], received: $version, diff --git a/grovedb-version/src/version/grovedb_versions.rs b/grovedb-version/src/version/grovedb_versions.rs index 598fa178..de6e3d42 100644 --- a/grovedb-version/src/version/grovedb_versions.rs +++ b/grovedb-version/src/version/grovedb_versions.rs @@ -48,6 +48,7 @@ pub struct GroveDBOperationsGetVersions { pub get: FeatureVersion, pub get_caching_optional: FeatureVersion, pub follow_reference: FeatureVersion, + pub follow_reference_once: FeatureVersion, pub get_raw: FeatureVersion, pub get_raw_caching_optional: FeatureVersion, pub get_raw_optional: FeatureVersion, @@ -190,6 +191,7 @@ pub struct GroveDBElementMethodVersions { pub get_optional_from_storage: FeatureVersion, pub get_with_absolute_refs: FeatureVersion, pub get_value_hash: FeatureVersion, + pub get_with_value_hash: FeatureVersion, pub get_specialized_cost: FeatureVersion, pub value_defined_cost: FeatureVersion, pub value_defined_cost_for_serialized_value: FeatureVersion, @@ -203,6 +205,7 @@ pub struct GroveDBElementMethodVersions { pub insert_if_changed_value_into_batch_operations: FeatureVersion, pub insert_reference: FeatureVersion, pub insert_reference_into_batch_operations: FeatureVersion, + pub insert_reference_if_changed_value: FeatureVersion, pub insert_subtree: FeatureVersion, pub insert_subtree_into_batch_operations: FeatureVersion, pub get_query: FeatureVersion, diff --git a/grovedb-version/src/version/v1.rs b/grovedb-version/src/version/v1.rs index 5bf58180..0234315a 100644 --- a/grovedb-version/src/version/v1.rs +++ b/grovedb-version/src/version/v1.rs @@ -65,6 +65,8 @@ pub const GROVE_V1: GroveVersion = GroveVersion { serialize: 0, serialized_size: 0, deserialize: 0, + get_with_value_hash: 0, + insert_reference_if_changed_value: 0, }, operations: GroveDBOperationsVersions { get: GroveDBOperationsGetVersions { @@ -86,6 +88,7 @@ pub const GROVE_V1: GroveVersion = GroveVersion { worst_case_for_get_raw: 0, worst_case_for_get: 0, is_empty_tree: 0, + follow_reference_once: 0, }, insert: GroveDBOperationsInsertVersions { insert: 0, diff --git a/grovedb-version/src/version/v2.rs b/grovedb-version/src/version/v2.rs index 3591ba1a..6f357c6b 100644 --- a/grovedb-version/src/version/v2.rs +++ b/grovedb-version/src/version/v2.rs @@ -65,6 +65,8 @@ pub const GROVE_V2: GroveVersion = GroveVersion { serialize: 0, serialized_size: 0, deserialize: 0, + get_with_value_hash: 0, + insert_reference_if_changed_value: 0, }, operations: GroveDBOperationsVersions { get: GroveDBOperationsGetVersions { @@ -86,6 +88,7 @@ pub const GROVE_V2: GroveVersion = GroveVersion { worst_case_for_get_raw: 0, worst_case_for_get: 0, is_empty_tree: 0, + follow_reference_once: 0, }, insert: GroveDBOperationsInsertVersions { insert: 0, diff --git a/grovedb/src/batch/estimated_costs/average_case_costs.rs b/grovedb/src/batch/estimated_costs/average_case_costs.rs index 310c5863..13fdbd29 100644 --- a/grovedb/src/batch/estimated_costs/average_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/average_case_costs.rs @@ -192,7 +192,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { let mut cost = OperationCost::default(); let layer_element_estimates = cost_return_on_error_no_add!( - &cost, + cost, self.paths.get(path).ok_or_else(|| { let paths = self .paths @@ -212,9 +212,9 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { .estimated_to_be_empty(); // Then we have to get the tree - if self.cached_merks.get(path).is_none() { + if !self.cached_merks.contains_key(path) { let layer_info = cost_return_on_error_no_add!( - &cost, + cost, self.paths.get(path).ok_or_else(|| { let paths = self .paths @@ -229,7 +229,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { }) ); cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_merk_at_path::( &mut cost, path, @@ -256,6 +256,8 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { Ok(([0u8; 32], None, AggregateData::NoAggregateData)).wrap_with_cost(cost) } + // Clippy's suggestion doesn't respect ownership in this case + #[allow(clippy::map_entry)] fn update_base_merk_root_key( &mut self, _root_key: Option>, @@ -268,7 +270,7 @@ impl TreeCache for AverageCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains_key(&base_path) { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_merk_at_path::( &mut cost, &base_path, diff --git a/grovedb/src/batch/estimated_costs/worst_case_costs.rs b/grovedb/src/batch/estimated_costs/worst_case_costs.rs index b48109ad..82dd7bbe 100644 --- a/grovedb/src/batch/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/batch/estimated_costs/worst_case_costs.rs @@ -189,7 +189,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { let mut cost = OperationCost::default(); let worst_case_layer_element_estimates = cost_return_on_error_no_add!( - &cost, + cost, self.paths .get(path) .ok_or_else(|| Error::PathNotFoundInCacheForEstimatedCosts(format!( @@ -201,7 +201,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains(path) { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_merk_at_path::( &mut cost, path, @@ -244,7 +244,7 @@ impl TreeCache for WorstCaseTreeCacheKnownPaths { // Then we have to get the tree if !self.cached_merks.contains(&base_path) { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_merk_at_path::( &mut cost, &base_path, diff --git a/grovedb/src/batch/just_in_time_reference_update.rs b/grovedb/src/batch/just_in_time_reference_update.rs index 06081eb2..53f6a835 100644 --- a/grovedb/src/batch/just_in_time_reference_update.rs +++ b/grovedb/src/batch/just_in_time_reference_update.rs @@ -54,13 +54,13 @@ where updated_new_element_with_old_flags.set_flags(maybe_old_flags.clone()); // There are no storage flags, we can just hash new element let new_serialized_bytes = cost_return_on_error_no_add!( - &cost, + cost, updated_new_element_with_old_flags.serialize(grove_version) ); let val_hash = value_hash(&new_serialized_bytes).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } else { - let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); + let val_hash = value_hash(serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } } else { @@ -94,7 +94,7 @@ where updated_new_element_with_old_flags.set_flags(maybe_old_flags.clone()); let serialized_with_old_flags = cost_return_on_error_no_add!( - &cost, + cost, updated_new_element_with_old_flags.serialize(grove_version) ); KV::node_value_byte_cost_size( @@ -120,7 +120,7 @@ where if let Some(old_element_flags) = maybe_old_flags.as_mut() { if let BasicStorageRemoval(removed_bytes) = storage_costs.removed_bytes { let (_, value_removed_bytes) = cost_return_on_error_no_add!( - &cost, + cost, split_removal_bytes(old_element_flags, 0, removed_bytes) ); storage_costs.removed_bytes = value_removed_bytes; @@ -130,7 +130,7 @@ where let mut new_element_cloned = original_new_element.clone(); let changed = cost_return_on_error_no_add!( - &cost, + cost, (flags_update)( &storage_costs, maybe_old_flags.clone(), @@ -150,10 +150,8 @@ where return Ok(val_hash).wrap_with_cost(cost); } else { // There are no storage flags, we can just hash new element - let new_serialized_bytes = cost_return_on_error_no_add!( - &cost, - new_element_cloned.serialize(grove_version) - ); + let new_serialized_bytes = + cost_return_on_error_no_add!(cost, new_element_cloned.serialize(grove_version)); new_storage_cost = KV::node_value_byte_cost_size( key.len() as u32, diff --git a/grovedb/src/batch/mod.rs b/grovedb/src/batch/mod.rs index 3d35525c..d101a3ee 100644 --- a/grovedb/src/batch/mod.rs +++ b/grovedb/src/batch/mod.rs @@ -57,12 +57,9 @@ use grovedb_merk::{ }; use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, - Storage, StorageBatch, StorageContext, -}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, + rocksdb_storage::PrefixedRocksDbTransactionContext, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use grovedb_visualize::{Drawer, Visualize}; use integer_encoding::VarInt; use itertools::Itertools; @@ -82,6 +79,7 @@ use crate::{ reference_path::{ path_from_reference_path_type, path_from_reference_qualified_path_type, ReferencePathType, }, + util::TxRef, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; @@ -708,11 +706,11 @@ where /// change in the same batch. It distinguishes between two cases: /// /// 1. When the hop count is exactly 1, it tries to directly extract the - /// value hash from the reference element. + /// value hash from the reference element. /// /// 2. When the hop count is greater than 1, it retrieves the referenced - /// element and then determines the next step based on the type of the - /// element. + /// element and then determines the next step based on the type of the + /// element. /// /// # Arguments /// @@ -724,12 +722,11 @@ where /// # Returns /// /// * `Ok(CryptoHash)`: Returns the crypto hash of the referenced element - /// wrapped in the - /// associated cost, if successful. + /// wrapped in the associated cost, if successful. /// /// * `Err(Error)`: Returns an error if there is an issue with the - /// operation, such as - /// missing reference, corrupted data, or invalid batch operation. + /// operation, such as missing reference, corrupted data, or invalid batch + /// operation. /// /// # Errors /// @@ -808,7 +805,7 @@ where Ok(referenced_element_value_hash).wrap_with_cost(cost) } else if let Some(referenced_path) = intermediate_reference_info { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type(referenced_path.clone(), qualified_path) ); self.follow_reference_get_value_hash( @@ -870,8 +867,8 @@ where /// the Merk tree. /// * `Error::CorruptedData` - If the referenced element cannot be /// deserialized due to corrupted data. - fn get_and_deserialize_referenced_element<'a>( - &'a mut self, + fn get_and_deserialize_referenced_element( + &mut self, key: &[u8], reference_path: &[Vec], grove_version: &GroveVersion, @@ -901,7 +898,7 @@ where if let Some(referenced_element) = referenced_element { let element = cost_return_on_error_no_add!( - &cost, + cost, Element::deserialize(referenced_element.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) @@ -1001,13 +998,13 @@ where match element { Element::Item(..) | Element::SumItem(..) => { let serialized = - cost_return_on_error_no_add!(&cost, element.serialize(grove_version)); + cost_return_on_error_no_add!(cost, element.serialize(grove_version)); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } Element::Reference(path, ..) => { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type(path, qualified_path) ); self.follow_reference_get_value_hash( @@ -1079,7 +1076,7 @@ where match element { Element::Item(..) | Element::SumItem(..) => { let serialized = cost_return_on_error_no_add!( - &cost, + cost, element.serialize(grove_version) ); if element.get_flags().is_none() { @@ -1126,7 +1123,7 @@ where } Element::Reference(path, ..) => { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type( path.clone(), qualified_path @@ -1154,13 +1151,13 @@ where GroveOp::InsertOnly { element } => match element { Element::Item(..) | Element::SumItem(..) => { let serialized = - cost_return_on_error_no_add!(&cost, element.serialize(grove_version)); + cost_return_on_error_no_add!(cost, element.serialize(grove_version)); let val_hash = value_hash(&serialized).unwrap_add_cost(&mut cost); Ok(val_hash).wrap_with_cost(cost) } Element::Reference(path, ..) => { let path = cost_return_on_error_no_add!( - &cost, + cost, path_from_reference_qualified_path_type(path.clone(), qualified_path) ); self.follow_reference_get_value_hash( @@ -1440,7 +1437,7 @@ where ) ); cost_return_on_error_no_add!( - &cost, + cost, Element::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) @@ -1529,7 +1526,7 @@ where cost_return_on_error!( &mut cost, GroveDb::update_tree_item_preserve_flag_into_batch_operations( - &merk, + merk, key_info.get_key(), root_key, hash, @@ -1576,7 +1573,7 @@ where } }; let merk_feature_type = - cost_return_on_error_no_add!(&cost, element.get_feature_type(in_tree_type)); + cost_return_on_error_no_add!(cost, element.get_feature_type(in_tree_type)); cost_return_on_error!( &mut cost, @@ -1713,12 +1710,9 @@ where ) .map_err(|e| Error::CorruptedData(e.to_string())) ); - let r = merk - .root_hash_key_and_aggregate_data() + merk.root_hash_key_and_aggregate_data() .add_cost(cost) - .map_err(Error::MerkError); - - r + .map_err(Error::MerkError) } fn get_batch_run_mode(&self) -> BatchRunMode { @@ -1785,7 +1779,7 @@ impl GroveDb { if batch_apply_options.base_root_storage_is_free { // the base root is free let mut update_root_cost = cost_return_on_error_no_add!( - &cost, + cost, merk_tree_cache .update_base_merk_root_key(calculated_root_key, grove_version) .cost_as_result() @@ -1826,14 +1820,11 @@ impl GroveDb { { match ops_on_path.entry(key.clone()) { Entry::Vacant(vacant_entry) => { - vacant_entry.insert( - GroveOp::ReplaceTreeRootKey { - hash: root_hash, - root_key: calculated_root_key, - aggregate_data, - } - .into(), - ); + vacant_entry.insert(GroveOp::ReplaceTreeRootKey { + hash: root_hash, + root_key: calculated_root_key, + aggregate_data, + }); } Entry::Occupied(occupied_entry) => { let mutable_occupied_entry = occupied_entry.into_mut(); @@ -1866,7 +1857,6 @@ impl GroveDb { aggregate_data: AggregateData::NoAggregateData, } - .into(); } else if let Element::SumTree(.., flags) = element { @@ -1877,7 +1867,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else if let Element::BigSumTree(.., flags) = element { @@ -1888,7 +1877,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else if let Element::CountTree(.., flags) = element { @@ -1899,7 +1887,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else if let Element::CountSumTree(.., flags) = element { @@ -1910,7 +1897,6 @@ impl GroveDb { flags: flags.clone(), aggregate_data, } - .into(); } else { return Err(Error::InvalidBatchOperation( "insertion of element under a non tree", @@ -1958,8 +1944,7 @@ impl GroveDb { hash: root_hash, root_key: calculated_root_key, aggregate_data, - } - .into(), + }, ); let mut ops_on_level: BTreeMap< KeyInfoPath, @@ -2282,74 +2267,6 @@ impl GroveDb { } } - /// Opens merk at path with given storage batch context. Returns CostResult. - pub fn open_batch_merk_at_path<'a, B: AsRef<[u8]>>( - &'a self, - storage_batch: &'a StorageBatch, - path: SubtreePath, - new_merk: bool, - grove_version: &GroveVersion, - ) -> CostResult, Error> { - check_grovedb_v0_with_cost!( - "open_batch_merk_at_path", - grove_version - .grovedb_versions - .apply_batch - .open_batch_merk_at_path - ); - let mut local_cost = OperationCost::default(); - let storage = self - .db - .get_storage_context(path.clone(), Some(storage_batch)) - .unwrap_add_cost(&mut local_cost); - - if new_merk { - let merk_type = if path.is_root() { - MerkType::BaseMerk - } else { - MerkType::LayeredMerk - }; - Ok(Merk::open_empty(storage, merk_type, TreeType::NormalTree)) - .wrap_with_cost(local_cost) - } else if let Some((base_path, last)) = path.derive_parent() { - let parent_storage = self - .db - .get_storage_context(base_path, Some(storage_batch)) - .unwrap_add_cost(&mut local_cost); - let element = cost_return_on_error!( - &mut local_cost, - Element::get_from_storage(&parent_storage, last, grove_version) - ); - if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(local_cost) - } else { - Err(Error::CorruptedData( - "cannot open a subtree as parent exists but is not a tree".to_owned(), - )) - .wrap_with_cost(local_cost) - } - } else { - Merk::open_base( - storage, - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a subtree".to_owned())) - .add_cost(local_cost) - } - } - /// Applies batch of operations on GroveDB pub fn apply_batch_with_element_flags_update( &self, @@ -2379,6 +2296,7 @@ impl GroveDb { .apply_batch_with_element_flags_update ); let mut cost = OperationCost::default(); + let tx = TxRef::new(&self.db, transaction); if ops.is_empty() { return Ok(()).wrap_with_cost(cost); @@ -2416,93 +2334,50 @@ impl GroveDb { // 5. Remove operation from the tree, repeat until there are operations to do; // 6. Add root leaves save operation to the batch // 7. Apply storage_cost batch - if let Some(tx) = transaction { - cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - batch_apply_options, - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_transactional_merk_at_path( - &storage_batch, - path.into(), - tx, - new_merk, - grove_version, - ) - }, - grove_version - ) - ); - - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_multi_context_batch(storage_batch, Some(tx)) - .map_err(|e| e.into()) - ); + cost_return_on_error!( + &mut cost, + self.apply_body( + ops, + batch_apply_options, + update_element_flags_function, + split_removal_bytes_function, + |path, new_merk| { + self.open_batch_transactional_merk_at_path( + &storage_batch, + path.into(), + tx.as_ref(), + new_merk, + grove_version, + ) + }, + grove_version + ) + ); - // Keep this commented for easy debugging in the future. - // let issues = self - // .visualize_verify_grovedb(Some(tx), true, - // &Default::default()) .unwrap(); - // if issues.len() > 0 { - // println!( - // "tx_issues: {}", - // issues - // .iter() - // .map(|(hash, (a, b, c))| format!("{}: {} {} {}", - // hash, a, b, c)) .collect::>() - // .join(" | ") - // ); - // } - } else { - cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - batch_apply_options, - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_merk_at_path( - &storage_batch, - path.into(), - new_merk, - grove_version, - ) - }, - grove_version - ) - ); + // TODO: compute batch costs + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(storage_batch, Some(tx.as_ref())) + .map_err(|e| e.into()) + ); - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_multi_context_batch(storage_batch, None) - .map_err(|e| e.into()) - ); + // Keep this commented for easy debugging in the future. + // let issues = self + // .visualize_verify_grovedb(Some(tx), true, + // &Default::default()) .unwrap(); + // if issues.len() > 0 { + // println!( + // "tx_issues: {}", + // issues + // .iter() + // .map(|(hash, (a, b, c))| format!("{}: {} {} {}", + // hash, a, b, c)) .collect::>() + // .join(" | ") + // ); + // } - // Keep this commented for easy debugging in the future. - // let issues = self - // .visualize_verify_grovedb(None, true, &Default::default()) - // .unwrap(); - // if issues.len() > 0 { - // println!( - // "non_tx_issues: {}", - // issues - // .iter() - // .map(|(hash, (a, b, c))| format!("{}: {} {} {}", - // hash, a, b, c)) .collect::>() - // .join(" | ") - // ); - // } - } - Ok(()).wrap_with_cost(cost) + tx.commit_local().wrap_with_cost(cost) } /// Applies a partial batch of operations on GroveDB @@ -2541,6 +2416,7 @@ impl GroveDb { .apply_partial_batch_with_element_flags_update ); let mut cost = OperationCost::default(); + let tx = TxRef::new(&self.db, transaction); if ops.is_empty() { return Ok(()).wrap_with_cost(cost); @@ -2582,177 +2458,93 @@ impl GroveDb { // 5. Remove operation from the tree, repeat until there are operations to do; // 6. Add root leaves save operation to the batch // 7. Apply storage_cost batch - if let Some(tx) = transaction { - let left_over_operations = cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - Some(batch_apply_options.clone()), - &mut update_element_flags_function, - &mut split_removal_bytes_function, - |path, new_merk| { - self.open_batch_transactional_merk_at_path( - &storage_batch, - path.into(), - tx, - new_merk, - grove_version, - ) - }, - grove_version - ) - ); - // if we paused at the root height, the left over operations would be to replace - // a lot of leaf nodes in the root tree - - // let's build the write batch - let (mut write_batch, mut pending_costs) = cost_return_on_error!( - &mut cost, - self.db - .build_write_batch(storage_batch) - .map_err(|e| e.into()) - ); - - let total_current_costs = cost.clone().add(pending_costs.clone()); - - // todo: estimate root costs - - // at this point we need to send the pending costs back - // we will get GroveDB a new set of GroveDBOps - - let new_operations = cost_return_on_error_no_add!( - &cost, - add_on_operations(&total_current_costs, &left_over_operations) - ); - - // we are trying to finalize - batch_apply_options.batch_pause_height = None; - - let continue_storage_batch = StorageBatch::new(); - - cost_return_on_error!( - &mut cost, - self.continue_partial_apply_body( - left_over_operations, - new_operations, - Some(batch_apply_options), - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_transactional_merk_at_path( - &continue_storage_batch, - path.into(), - tx, - new_merk, - grove_version, - ) - }, - grove_version - ) - ); - - // let's build the write batch - let continued_pending_costs = cost_return_on_error!( - &mut cost, - self.db - .continue_write_batch(&mut write_batch, continue_storage_batch) - .map_err(|e| e.into()) - ); - - pending_costs.add_assign(continued_pending_costs); + let left_over_operations = cost_return_on_error!( + &mut cost, + self.apply_body( + ops, + Some(batch_apply_options.clone()), + &mut update_element_flags_function, + &mut split_removal_bytes_function, + |path, new_merk| { + self.open_batch_transactional_merk_at_path( + &storage_batch, + path.into(), + tx.as_ref(), + new_merk, + grove_version, + ) + }, + grove_version + ) + ); + // if we paused at the root height, the left over operations would be to replace + // a lot of leaf nodes in the root tree - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_db_write_batch(write_batch, pending_costs, Some(tx)) - .map_err(|e| e.into()) - ); - } else { - let left_over_operations = cost_return_on_error!( - &mut cost, - self.apply_body( - ops, - Some(batch_apply_options.clone()), - &mut update_element_flags_function, - &mut split_removal_bytes_function, - |path, new_merk| { - self.open_batch_merk_at_path( - &storage_batch, - path.into(), - new_merk, - grove_version, - ) - }, - grove_version - ) - ); + // let's build the write batch + let (mut write_batch, mut pending_costs) = cost_return_on_error!( + &mut cost, + self.db + .build_write_batch(storage_batch) + .map_err(|e| e.into()) + ); - // if we paused at the root height, the left over operations would be to replace - // a lot of leaf nodes in the root tree + let total_current_costs = cost.clone().add(pending_costs.clone()); - // let's build the write batch - let (mut write_batch, mut pending_costs) = cost_return_on_error!( - &mut cost, - self.db - .build_write_batch(storage_batch) - .map_err(|e| e.into()) - ); + // todo: estimate root costs - let total_current_costs = cost.clone().add(pending_costs.clone()); + // at this point we need to send the pending costs back + // we will get GroveDB a new set of GroveDBOps - // at this point we need to send the pending costs back - // we will get GroveDB a new set of GroveDBOps + let new_operations = cost_return_on_error_no_add!( + cost, + add_on_operations(&total_current_costs, &left_over_operations) + ); - let new_operations = cost_return_on_error_no_add!( - &cost, - add_on_operations(&total_current_costs, &left_over_operations) - ); + // we are trying to finalize + batch_apply_options.batch_pause_height = None; - // we are trying to finalize - batch_apply_options.batch_pause_height = None; + let continue_storage_batch = StorageBatch::new(); - let continue_storage_batch = StorageBatch::new(); + cost_return_on_error!( + &mut cost, + self.continue_partial_apply_body( + left_over_operations, + new_operations, + Some(batch_apply_options), + update_element_flags_function, + split_removal_bytes_function, + |path, new_merk| { + self.open_batch_transactional_merk_at_path( + &continue_storage_batch, + path.into(), + tx.as_ref(), + new_merk, + grove_version, + ) + }, + grove_version + ) + ); - cost_return_on_error!( - &mut cost, - self.continue_partial_apply_body( - left_over_operations, - new_operations, - Some(batch_apply_options), - update_element_flags_function, - split_removal_bytes_function, - |path, new_merk| { - self.open_batch_merk_at_path( - &continue_storage_batch, - path.into(), - new_merk, - grove_version, - ) - }, - grove_version - ) - ); + // let's build the write batch + let continued_pending_costs = cost_return_on_error!( + &mut cost, + self.db + .continue_write_batch(&mut write_batch, continue_storage_batch) + .map_err(|e| e.into()) + ); - // let's build the write batch - let continued_pending_costs = cost_return_on_error!( - &mut cost, - self.db - .continue_write_batch(&mut write_batch, continue_storage_batch) - .map_err(|e| e.into()) - ); + pending_costs.add_assign(continued_pending_costs); - pending_costs.add_assign(continued_pending_costs); + // TODO: compute batch costs + cost_return_on_error!( + &mut cost, + self.db + .commit_db_write_batch(write_batch, pending_costs, Some(tx.as_ref())) + .map_err(|e| e.into()) + ); - // TODO: compute batch costs - cost_return_on_error!( - &mut cost, - self.db - .commit_db_write_batch(write_batch, pending_costs, None) - .map_err(|e| e.into()) - ); - } - Ok(()).wrap_with_cost(cost) + tx.commit_local().wrap_with_cost(cost) } #[cfg(feature = "estimated_costs")] diff --git a/grovedb/src/debugger.rs b/grovedb/src/debugger.rs index 696a3a6a..50dbcce0 100644 --- a/grovedb/src/debugger.rs +++ b/grovedb/src/debugger.rs @@ -227,9 +227,15 @@ async fn fetch_node( }): Json>, ) -> Result>, AppError> { let db = state.get_snapshot(session_id).await?; + let transaction = db.start_transaction(); let merk = db - .open_non_transactional_merk_at_path(path.as_slice().into(), None, GroveVersion::latest()) + .open_transactional_merk_at_path( + path.as_slice().into(), + &transaction, + None, + GroveVersion::latest(), + ) .unwrap()?; let node = merk.get_node_dbg(&key)?; @@ -249,9 +255,15 @@ async fn fetch_root_node( }): Json>, ) -> Result>, AppError> { let db = state.get_snapshot(session_id).await?; + let transaction = db.start_transaction(); let merk = db - .open_non_transactional_merk_at_path(SubtreePath::empty(), None, GroveVersion::latest()) + .open_transactional_merk_at_path( + SubtreePath::empty(), + &transaction, + None, + GroveVersion::latest(), + ) .unwrap()?; let node = merk.get_root_node_dbg()?; @@ -312,6 +324,7 @@ fn query_result_to_grovedbg( query_result: QueryResultElements, ) -> Result, crate::Error> { let mut result = Vec::new(); + let transaction = db.start_transaction(); let mut last_merk: Option<(Vec>, grovedb_merk::Merk<_>)> = None; @@ -322,8 +335,9 @@ fn query_result_to_grovedbg( _ => { last_merk = Some(( path.clone(), - db.open_non_transactional_merk_at_path( + db.open_transactional_merk_at_path( path.as_slice().into(), + &transaction, None, GroveVersion::latest(), ) diff --git a/grovedb/src/element/delete.rs b/grovedb/src/element/delete.rs index 17095d72..4440d426 100644 --- a/grovedb/src/element/delete.rs +++ b/grovedb/src/element/delete.rs @@ -1,8 +1,6 @@ //! Delete //! Implements functions in Element for deleting -#[cfg(feature = "minimal")] -use grovedb_costs::OperationCost; #[cfg(feature = "minimal")] use grovedb_costs::{storage_cost::removal::StorageRemovedBytes, CostResult, CostsExt}; #[cfg(feature = "minimal")] @@ -14,8 +12,6 @@ use grovedb_storage::StorageContext; #[cfg(feature = "minimal")] use grovedb_version::check_grovedb_v0_with_cost; #[cfg(feature = "minimal")] -use grovedb_version::error::GroveVersionError; -#[cfg(feature = "minimal")] use grovedb_version::version::GroveVersion; #[cfg(feature = "minimal")] diff --git a/grovedb/src/element/exists.rs b/grovedb/src/element/exists.rs index 63dcfe4b..6380a7b4 100644 --- a/grovedb/src/element/exists.rs +++ b/grovedb/src/element/exists.rs @@ -1,12 +1,10 @@ //! Exists //! Implements in Element functions for checking if stuff exists -use grovedb_costs::{CostResult, CostsExt, OperationCost}; +use grovedb_costs::CostResult; use grovedb_merk::Merk; use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use crate::{Element, Error}; diff --git a/grovedb/src/element/get.rs b/grovedb/src/element/get.rs index 2550e397..c5c893c5 100644 --- a/grovedb/src/element/get.rs +++ b/grovedb/src/element/get.rs @@ -41,7 +41,7 @@ impl Element { let value = result?; value.ok_or_else(|| { let key_single_byte = if key.as_ref().len() == 1 { - format!("({} in decimal) ", key.as_ref().get(0).unwrap()) + format!("({} in decimal) ", key.as_ref().first().unwrap()) } else { String::new() }; @@ -84,7 +84,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) ); let element = cost_return_on_error_no_add!( - &cost, + cost, value_opt .map(|value| { Self::deserialize(value.as_slice(), grove_version).map_err(|_| { @@ -164,7 +164,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) ); let maybe_tree_inner: Option = cost_return_on_error_no_add!( - &cost, + cost, node_value_opt .map(|node_value| { Decode::decode(node_value.as_slice()) @@ -175,7 +175,7 @@ impl Element { let value = maybe_tree_inner.map(|tree_inner| tree_inner.value_as_owned()); let element = cost_return_on_error_no_add!( - &cost, + cost, value .as_ref() .map(|value| { @@ -248,7 +248,7 @@ impl Element { .map_err(|e| Error::CorruptedData(e.to_string())) ); let maybe_tree_inner: Option = cost_return_on_error_no_add!( - &cost, + cost, node_value_opt .map(|node_value| { Decode::decode(node_value.as_slice()) @@ -264,7 +264,7 @@ impl Element { }; let node_type = tree_feature_type.node_type(); let element = cost_return_on_error_no_add!( - &cost, + cost, Self::deserialize(value.as_slice(), grove_version).map_err(|_| { Error::CorruptedData(String::from("unable to deserialize element")) }) @@ -340,7 +340,7 @@ impl Element { ); let absolute_element = cost_return_on_error_no_add!( - &cost, + cost, element.convert_if_reference_to_absolute_reference(path, Some(key.as_ref())) ); @@ -378,7 +378,7 @@ impl Element { None => Ok(None).wrap_with_cost(cost), Some(element) => { let absolute_element = cost_return_on_error_no_add!( - &cost, + cost, element.convert_if_reference_to_absolute_reference(path, Some(key.as_ref())) ); Ok(Some(absolute_element)).wrap_with_cost(cost) @@ -413,6 +413,47 @@ impl Element { Ok(value_hash).wrap_with_cost(cost) } + + #[cfg(feature = "minimal")] + /// Get an element and its value hash from Merk under a key + pub fn get_with_value_hash<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( + merk: &Merk, + key: K, + allow_cache: bool, + grove_version: &GroveVersion, + ) -> CostResult<(Element, Hash), Error> { + check_grovedb_v0_with_cost!( + "get_with_value_hash", + grove_version.grovedb_versions.element.get_with_value_hash + ); + let mut cost = OperationCost::default(); + + let Some((value, value_hash)) = cost_return_on_error!( + &mut cost, + merk.get_value_and_value_hash( + key.as_ref(), + allow_cache, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version + ) + .map_err(|e| Error::CorruptedData(e.to_string())) + ) else { + return Err(Error::PathKeyNotFound(format!( + "get: key \"{}\" not found in Merk that has a root key [{}] and is of type {}", + hex::encode(key), + merk.root_key() + .map(hex::encode) + .unwrap_or("None".to_string()), + merk.merk_type + ))) + .wrap_with_cost(cost); + }; + + Self::deserialize(value.as_slice(), grove_version) + .map_err(|_| Error::CorruptedData(String::from("unable to deserialize element"))) + .map(|e| (e, value_hash)) + .wrap_with_cost(cost) + } } #[cfg(feature = "minimal")] @@ -429,8 +470,10 @@ mod tests { let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let ctx = storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), &transaction) .unwrap(); let mut merk = Merk::open_base( ctx, @@ -450,12 +493,12 @@ mod tests { .expect("expected successful insertion 2"); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .unwrap(); let ctx = storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(); let mut merk = Merk::open_base( ctx, diff --git a/grovedb/src/element/helpers.rs b/grovedb/src/element/helpers.rs index d5cf0321..54e99604 100644 --- a/grovedb/src/element/helpers.rs +++ b/grovedb/src/element/helpers.rs @@ -20,7 +20,7 @@ use grovedb_merk::{ }, }; #[cfg(feature = "minimal")] -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; #[cfg(feature = "minimal")] use integer_encoding::VarInt; @@ -390,7 +390,7 @@ impl Element { #[cfg(feature = "minimal")] /// Get tree costs for a key value pub fn specialized_costs_for_key_value( - key: &Vec, + key: &[u8], value: &[u8], node_type: NodeType, grove_version: &GroveVersion, @@ -497,9 +497,7 @@ impl Element { #[cfg(feature = "minimal")] /// Get the value defined cost for a serialized value pub fn value_defined_cost(&self, grove_version: &GroveVersion) -> Option { - let Some(value_cost) = self.get_specialized_cost(grove_version).ok() else { - return None; - }; + let value_cost = self.get_specialized_cost(grove_version).ok()?; let cost = value_cost + self.get_flags().as_ref().map_or(0, |flags| { diff --git a/grovedb/src/element/insert.rs b/grovedb/src/element/insert.rs index 942a6fd5..85dad9f9 100644 --- a/grovedb/src/element/insert.rs +++ b/grovedb/src/element/insert.rs @@ -7,9 +7,7 @@ use grovedb_costs::{ }; use grovedb_merk::{BatchEntry, Error as MerkError, Merk, MerkOptions, Op, TreeFeatureType}; use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use integer_encoding::VarInt; use crate::{Element, Element::SumItem, Error, Hash}; @@ -397,10 +395,10 @@ impl Element { let cost = OperationCost::default(); let merk_feature_type = - cost_return_on_error_no_add!(&cost, self.get_feature_type(merk.tree_type)); + cost_return_on_error_no_add!(cost, self.get_feature_type(merk.tree_type)); let tree_cost = - cost_return_on_error_no_add!(&cost, self.get_specialized_cost(grove_version)); + cost_return_on_error_no_add!(cost, self.get_specialized_cost(grove_version)); let cost = tree_cost + self.get_flags().as_ref().map_or(0, |flags| { diff --git a/grovedb/src/element/query.rs b/grovedb/src/element/query.rs index cd4f5030..1626ad5c 100644 --- a/grovedb/src/element/query.rs +++ b/grovedb/src/element/query.rs @@ -15,15 +15,11 @@ use grovedb_merk::proofs::query::SubqueryBranch; #[cfg(feature = "minimal")] use grovedb_merk::proofs::Query; #[cfg(feature = "minimal")] -use grovedb_merk::tree_type::TreeType; -#[cfg(feature = "minimal")] use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::{rocksdb_storage::RocksDbStorage, RawIterator, StorageContext}; #[cfg(feature = "minimal")] -use grovedb_version::{ - check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0, check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; @@ -41,7 +37,6 @@ use crate::{ QueryPathKeyElementTrioResultType, }, }, - util::{merk_optional_tx, merk_optional_tx_internal_error, storage_context_optional_tx}, Error, PathQuery, TransactionArg, }; #[cfg(feature = "minimal")] @@ -174,7 +169,7 @@ fn format_subquery_branch(branch: &SubqueryBranch, indent: usize) -> String { } #[cfg(feature = "minimal")] -impl<'db, 'ctx, 'a> fmt::Display for PathQueryPushArgs<'db, 'ctx, 'a> +impl<'db, 'ctx> fmt::Display for PathQueryPushArgs<'db, 'ctx, '_> where 'db: 'ctx, { @@ -453,6 +448,8 @@ impl Element { args: PathQueryPushArgs, grove_version: &GroveVersion, ) -> CostResult<(), Error> { + use crate::util::{compat, TxRef}; + check_grovedb_v0_with_cost!( "path_query_push", grove_version.grovedb_versions.element.path_query_push @@ -477,6 +474,9 @@ impl Element { limit, offset, } = args; + + let tx = TxRef::new(storage, transaction); + let QueryOptions { allow_get_raw, allow_cache, @@ -486,7 +486,7 @@ impl Element { if element.is_any_tree() { let mut path_vec = path.to_vec(); let key = cost_return_on_error_no_add!( - &cost, + cost, key.ok_or(Error::MissingParameter( "the key must be provided when using a subquery path", )) @@ -534,95 +534,68 @@ impl Element { path_vec.extend(subquery_path_front_keys.iter().map(|k| k.as_slice())); let subtree_path: SubtreePath<_> = path_vec.as_slice().into(); + let subtree = cost_return_on_error!( + &mut cost, + compat::merk_optional_tx( + storage, + subtree_path, + tx.as_ref(), + None, + grove_version + ) + ); match result_type { QueryElementResultType => { - merk_optional_tx!( + if let Some(element) = cost_return_on_error!( &mut cost, - storage, - subtree_path, - None, - transaction, - subtree, - grove_version, - { - if let Some(element) = cost_return_on_error!( - &mut cost, - Element::get_optional_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ) { - results.push(QueryResultElement::ElementResultItem( - element, - )); - } - } - ); + Element::get_optional_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ) { + results.push(QueryResultElement::ElementResultItem(element)); + } } QueryKeyElementPairResultType => { - merk_optional_tx!( + if let Some(element) = cost_return_on_error!( &mut cost, - storage, - subtree_path, - None, - transaction, - subtree, - grove_version, - { - if let Some(element) = cost_return_on_error!( - &mut cost, - Element::get_optional_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ) { - results.push( - QueryResultElement::KeyElementPairResultItem(( - subquery_path_last_key.to_vec(), - element, - )), - ); - } - } - ); + Element::get_optional_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ) { + results.push(QueryResultElement::KeyElementPairResultItem(( + subquery_path_last_key.to_vec(), + element, + ))); + } } QueryPathKeyElementTrioResultType => { - merk_optional_tx!( + if let Some(element) = cost_return_on_error!( &mut cost, - storage, - subtree_path, - None, - transaction, - subtree, - grove_version, - { - if let Some(element) = cost_return_on_error!( - &mut cost, - Element::get_optional_with_absolute_refs( - &subtree, - path_vec.as_slice(), - subquery_path_last_key.as_slice(), - allow_cache, - grove_version, - ) - ) { - results.push( - QueryResultElement::PathKeyElementTrioResultItem(( - path_vec.iter().map(|p| p.to_vec()).collect(), - subquery_path_last_key.to_vec(), - element, - )), - ); - } - } - ); + Element::get_optional_with_absolute_refs( + &subtree, + path_vec.as_slice(), + subquery_path_last_key.as_slice(), + allow_cache, + grove_version, + ) + ) { + results.push(QueryResultElement::PathKeyElementTrioResultItem( + ( + path_vec.iter().map(|p| p.to_vec()).collect(), + subquery_path_last_key.to_vec(), + element, + ), + )); + } } } } else { @@ -640,7 +613,7 @@ impl Element { } } else if allow_get_raw { cost_return_on_error_no_add!( - &cost, + cost, Element::basic_push( PathQueryPushArgs { storage, @@ -670,7 +643,7 @@ impl Element { } } else { cost_return_on_error_no_add!( - &cost, + cost, Element::basic_push( PathQueryPushArgs { storage, @@ -751,7 +724,12 @@ impl Element { add_element_function: fn(PathQueryPushArgs, &GroveVersion) -> CostResult<(), Error>, grove_version: &GroveVersion, ) -> CostResult<(), Error> { - use crate::error::GroveDbErrorExt; + use grovedb_storage::Storage; + + use crate::{ + error::GroveDbErrorExt, + util::{compat, TxRef}, + }; check_grovedb_v0_with_cost!( "query_item", @@ -759,26 +737,36 @@ impl Element { ); let mut cost = OperationCost::default(); + let tx = TxRef::new(storage, transaction); let subtree_path: SubtreePath<_> = path.into(); if !item.is_range() { // this is a query on a key if let QueryItem::Key(key) = item { - let element_res = merk_optional_tx_internal_error!( - &mut cost, + let subtree_res = compat::merk_optional_tx( storage, subtree_path, + tx.as_ref(), None, - transaction, - subtree, grove_version, - { + ); + + if subtree_res.value().is_err() + && !matches!(subtree_res.value(), Err(Error::PathParentLayerNotFound(..))) + { + // simulating old macro's behavior by letting this particular kind of error to + // pass and to short circuit with the rest + return subtree_res.map_ok(|_| ()); + } + + let element_res = subtree_res + .flat_map_ok(|subtree| { Element::get(&subtree, key, query_options.allow_cache, grove_version) .add_context(format!("path is {}", path_as_slices_hex_to_ascii(path))) - .unwrap_add_cost(&mut cost) - } - ); + }) + .unwrap_add_cost(&mut cost); + match element_res { Ok(element) => { let (subquery_path, subquery) = @@ -835,74 +823,74 @@ impl Element { } } else { // this is a query on a range - storage_context_optional_tx!(storage, subtree_path, None, transaction, ctx, { - let ctx = ctx.unwrap_add_cost(&mut cost); - let mut iter = ctx.raw_iter(); + let ctx = storage + .get_transactional_storage_context(subtree_path, None, tx.as_ref()) + .unwrap_add_cost(&mut cost); - item.seek_for_iter(&mut iter, sized_query.query.left_to_right) - .unwrap_add_cost(&mut cost); + let mut iter = ctx.raw_iter(); + + item.seek_for_iter(&mut iter, sized_query.query.left_to_right) + .unwrap_add_cost(&mut cost); - while item - .iter_is_valid_for_type(&iter, *limit, sized_query.query.left_to_right) + while item + .iter_is_valid_for_type(&iter, *limit, sized_query.query.left_to_right) + .unwrap_add_cost(&mut cost) + { + let element = cost_return_on_error_no_add!( + cost, + raw_decode( + iter.value() + .unwrap_add_cost(&mut cost) + .expect("if key exists then value should too"), + grove_version + ) + ); + let key = iter + .key() .unwrap_add_cost(&mut cost) - { - let element = cost_return_on_error_no_add!( - &cost, - raw_decode( - iter.value() - .unwrap_add_cost(&mut cost) - .expect("if key exists then value should too"), - grove_version - ) - ); - let key = iter - .key() - .unwrap_add_cost(&mut cost) - .expect("key should exist"); - let (subquery_path, subquery) = - Self::subquery_paths_and_value_for_sized_query(sized_query, key); - let result_with_cost = add_element_function( - PathQueryPushArgs { - storage, - transaction, - key: Some(key), - element, - path, - subquery_path, - subquery, - left_to_right: sized_query.query.left_to_right, - query_options, - result_type, - results, - limit, - offset, - }, - grove_version, - ); - let result = result_with_cost.unwrap_add_cost(&mut cost); - match result { - Ok(x) => x, - Err(e) => { - if !query_options.error_if_intermediate_path_tree_not_present { - match e { - Error::PathKeyNotFound(_) - | Error::PathParentLayerNotFound(_) => (), - _ => return Err(e).wrap_with_cost(cost), - } - } else { - return Err(e).wrap_with_cost(cost); + .expect("key should exist"); + let (subquery_path, subquery) = + Self::subquery_paths_and_value_for_sized_query(sized_query, key); + let result_with_cost = add_element_function( + PathQueryPushArgs { + storage, + transaction, + key: Some(key), + element, + path, + subquery_path, + subquery, + left_to_right: sized_query.query.left_to_right, + query_options, + result_type, + results, + limit, + offset, + }, + grove_version, + ); + let result = result_with_cost.unwrap_add_cost(&mut cost); + match result { + Ok(x) => x, + Err(e) => { + if !query_options.error_if_intermediate_path_tree_not_present { + match e { + Error::PathKeyNotFound(_) | Error::PathParentLayerNotFound(_) => (), + _ => return Err(e).wrap_with_cost(cost), } + } else { + return Err(e).wrap_with_cost(cost); } } - if sized_query.query.left_to_right { - iter.next().unwrap_add_cost(&mut cost); - } else { - iter.prev().unwrap_add_cost(&mut cost); - } - cost.seek_count += 1; } - Ok(()) - }) + if sized_query.query.left_to_right { + iter.next().unwrap_add_cost(&mut cost); + } else { + iter.prev().unwrap_add_cost(&mut cost); + } + cost.seek_count += 1; + } + Ok(()) } .wrap_with_cost(cost) } @@ -1210,9 +1198,12 @@ mod tests { let batch = StorageBatch::new(); let storage = &db.db; + let transaction = db.start_transaction(); + let mut merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1241,6 +1232,8 @@ mod tests { .unwrap() .expect("expected successful batch commit"); + transaction.commit().unwrap(); + // Test range inclusive query let mut query = Query::new(); query.insert_range(b"a".to_vec()..b"d".to_vec()); @@ -1322,9 +1315,12 @@ mod tests { let batch = StorageBatch::new(); let storage = &db.db; + let transaction = db.start_transaction(); + let mut merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1353,6 +1349,8 @@ mod tests { .unwrap() .expect("expected successful batch commit"); + transaction.commit().unwrap(); + // Test range inclusive query let mut query = Query::new_with_direction(true); query.insert_range_inclusive(b"a".to_vec()..=b"d".to_vec()); @@ -1724,7 +1722,7 @@ impl ElementsIterator { .unwrap_add_cost(&mut cost) .zip(self.raw_iter.value().unwrap_add_cost(&mut cost)) { - let element = cost_return_on_error_no_add!(&cost, raw_decode(value, grove_version)); + let element = cost_return_on_error_no_add!(cost, raw_decode(value, grove_version)); let key_vec = key.to_vec(); self.raw_iter.next().unwrap_add_cost(&mut cost); Some((key_vec, element)) diff --git a/grovedb/src/element/serialize.rs b/grovedb/src/element/serialize.rs index d0974cc9..d65a0670 100644 --- a/grovedb/src/element/serialize.rs +++ b/grovedb/src/element/serialize.rs @@ -2,7 +2,7 @@ //! Implements serialization functions in Element use bincode::config; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; #[cfg(any(feature = "minimal", feature = "verify"))] use crate::{Element, Error}; diff --git a/grovedb/src/estimated_costs/average_case_costs.rs b/grovedb/src/estimated_costs/average_case_costs.rs index e779d9b8..fc0da61a 100644 --- a/grovedb/src/estimated_costs/average_case_costs.rs +++ b/grovedb/src/estimated_costs/average_case_costs.rs @@ -118,7 +118,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .layered_flags_size() @@ -154,7 +154,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .layered_flags_size() @@ -235,7 +235,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let flags_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .layered_flags_size() @@ -288,7 +288,7 @@ impl GroveDb { add_cost_case_merk_insert( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_tree_type, ) } @@ -344,7 +344,7 @@ impl GroveDb { let sum_item_cost_size = if value.is_sum_item() { SUM_ITEM_COST_SIZE } else { - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32 + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32 }; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_replace_same_size(&mut cost, key_len, value_len, in_tree_type) @@ -352,7 +352,7 @@ impl GroveDb { _ => add_cost_case_merk_replace_same_size( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_tree_type, ), }; @@ -395,8 +395,7 @@ impl GroveDb { }); // Items need to be always the same serialized size for this to work let item_cost_size = - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) - as u32; + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32; let value_len = item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -439,7 +438,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let key_len = key.max_length() as u32; let value_size = cost_return_on_error_no_add!( - &cost, + cost, estimated_layer_information .estimated_layer_sizes .value_with_feature_and_flags_size(grove_version) @@ -638,10 +637,11 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); let mut merk = Merk::open_base( storage - .get_storage_context(EMPTY_PATH, Some(&batch)) + .get_transactional_storage_context(EMPTY_PATH, Some(&batch), &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -656,13 +656,15 @@ mod test { // this consumes the batch so storage contexts and merks will be dropped storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .unwrap(); // Reopen merk: this time, only root node is loaded to memory let merk = Merk::open_base( - storage.get_storage_context(EMPTY_PATH, None).unwrap(), + storage + .get_transactional_storage_context(EMPTY_PATH, None, &transaction) + .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, grove_version, diff --git a/grovedb/src/estimated_costs/worst_case_costs.rs b/grovedb/src/estimated_costs/worst_case_costs.rs index c6f889f9..c5287c14 100644 --- a/grovedb/src/estimated_costs/worst_case_costs.rs +++ b/grovedb/src/estimated_costs/worst_case_costs.rs @@ -19,9 +19,7 @@ use grovedb_merk::{ HASH_LENGTH, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; -use grovedb_version::{ - check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0, check_grovedb_v0_with_cost, version::GroveVersion}; use integer_encoding::VarInt; use crate::{ @@ -209,7 +207,7 @@ impl GroveDb { _ => add_cost_case_merk_insert( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_parent_tree_type, ), }; @@ -277,7 +275,7 @@ impl GroveDb { _ => add_cost_case_merk_replace( &mut cost, key_len, - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) as u32, + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32, in_parent_tree_type, ), }; @@ -319,8 +317,7 @@ impl GroveDb { }); // Items need to be always the same serialized size for this to work let sum_item_cost_size = - cost_return_on_error_no_add!(&cost, value.serialized_size(grove_version)) - as u32; + cost_return_on_error_no_add!(cost, value.serialized_size(grove_version)) as u32; let value_len = sum_item_cost_size + flags_len; add_cost_case_merk_patch( &mut cost, @@ -522,7 +519,9 @@ mod test { // Open a merk and insert 10 elements. let storage = TempStorage::new(); let batch = StorageBatch::new(); - let mut merk = empty_path_merk(&*storage, &batch, grove_version); + let transaction = storage.start_transaction(); + + let mut merk = empty_path_merk(&*storage, &transaction, &batch, grove_version); let merk_batch = make_batch_seq(1..10); merk.apply::<_, Vec<_>>(merk_batch.as_slice(), &[], None, grove_version) @@ -531,12 +530,12 @@ mod test { // this consumes the batch so storage contexts and merks will be dropped storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .unwrap(); // Reopen merk: this time, only root node is loaded to memory - let merk = empty_path_merk_read_only(&*storage, grove_version); + let merk = empty_path_merk_read_only(&*storage, &transaction, grove_version); // To simulate worst case, we need to pick a node that: // 1. Is not in memory diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 7b4e9c00..85a0189c 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -135,6 +135,8 @@ pub mod element; pub mod error; #[cfg(feature = "estimated_costs")] mod estimated_costs; +#[cfg(feature = "minimal")] +mod merk_cache; #[cfg(any(feature = "minimal", feature = "verify"))] pub mod operations; #[cfg(any(feature = "minimal", feature = "verify"))] @@ -197,10 +199,7 @@ use grovedb_storage::rocksdb_storage::PrefixedRocksDbImmediateStorageContext; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; #[cfg(feature = "minimal")] -use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, - StorageBatch, -}; +use grovedb_storage::{rocksdb_storage::PrefixedRocksDbTransactionContext, StorageBatch}; #[cfg(feature = "minimal")] use grovedb_storage::{Storage, StorageContext}; #[cfg(feature = "minimal")] @@ -213,6 +212,8 @@ pub use query::{PathQuery, SizedQuery}; use reference_path::path_from_reference_path_type; #[cfg(feature = "grovedbg")] use tokio::net::ToSocketAddrs; +#[cfg(feature = "minimal")] +use util::{compat, TxRef}; #[cfg(feature = "minimal")] use crate::element::helpers::raw_decode; @@ -221,8 +222,6 @@ pub use crate::error::Error; #[cfg(feature = "minimal")] use crate::operations::proof::util::hex_to_ascii; #[cfg(feature = "minimal")] -use crate::util::{root_merk_optional_tx, storage_context_optional_tx}; -#[cfg(feature = "minimal")] use crate::Error::MerkError; #[cfg(feature = "minimal")] @@ -298,58 +297,32 @@ impl GroveDb { where B: AsRef<[u8]> + 'b, { - let mut cost = OperationCost::default(); - - let storage = self - .db - .get_transactional_storage_context(path.clone(), batch, tx) - .unwrap_add_cost(&mut cost); - if let Some((parent_path, parent_key)) = path.derive_parent() { - let parent_storage = self - .db - .get_transactional_storage_context(parent_path.clone(), batch, tx) - .unwrap_add_cost(&mut cost); - let element = cost_return_on_error!( - &mut cost, - Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( - |e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - } - ) - ); - if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) - } else { - Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree".to_string(), + struct Compat; + + impl compat::OpenMerkErrorsCompat for Compat { + fn parent_key_not_found>( + e: Error, + parent_path: SubtreePath, + parent_key: &[u8], + ) -> Error { + Error::InvalidParentLayerPath(format!( + "could not get key {} for parent {:?} of subtree: {}", + hex::encode(parent_key), + DebugByteVectors(parent_path.to_vec()), + e )) - .wrap_with_cost(cost) } - } else { - Merk::open_base( - storage, - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) + + fn open_base_error() -> Error { + Error::CorruptedData("cannot open a the root subtree".to_owned()) + } + + fn parent_must_be_tree() -> Error { + Error::CorruptedData("cannot open a subtree with given root key".to_owned()) + } } + + compat::open_merk::<_, Compat>(&self.db, path, tx, batch, grove_version) } fn open_transactional_merk_by_prefix<'db>( @@ -461,110 +434,6 @@ impl GroveDb { } } - /// Opens the non-transactional Merk at the given path. Returns CostResult. - fn open_non_transactional_merk_at_path<'db, 'b, B>( - &'db self, - path: SubtreePath<'b, B>, - batch: Option<&'db StorageBatch>, - grove_version: &GroveVersion, - ) -> CostResult>, Error> - where - B: AsRef<[u8]> + 'b, - { - let mut cost = OperationCost::default(); - - let storage = self - .db - .get_storage_context(path.clone(), batch) - .unwrap_add_cost(&mut cost); - - if let Some((parent_path, parent_key)) = path.derive_parent() { - let parent_storage = self - .db - .get_storage_context(parent_path.clone(), batch) - .unwrap_add_cost(&mut cost); - let element = cost_return_on_error!( - &mut cost, - Element::get_from_storage(&parent_storage, parent_key, grove_version).map_err( - |e| { - Error::InvalidParentLayerPath(format!( - "could not get key {} for parent {:?} of subtree: {}", - hex::encode(parent_key), - DebugByteVectors(parent_path.to_vec()), - e - )) - } - ) - ); - if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData("cannot open a subtree with given root key".to_owned()) - }) - .add_cost(cost) - } else { - Err(Error::CorruptedPath( - "cannot open a subtree as parent exists but is not a tree".to_string(), - )) - .wrap_with_cost(cost) - } - } else { - Merk::open_base( - storage, - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a the root subtree".to_owned())) - .add_cost(cost) - } - } - - fn open_non_transactional_merk_by_prefix<'db>( - &'db self, - prefix: SubtreePrefix, - root_key: Option>, - tree_type: TreeType, - batch: Option<&'db StorageBatch>, - grove_version: &GroveVersion, - ) -> CostResult>, Error> { - let mut cost = OperationCost::default(); - let storage = self - .db - .get_storage_context_by_subtree_prefix(prefix, batch) - .unwrap_add_cost(&mut cost); - if root_key.is_some() { - Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| { - Error::CorruptedData( - "cannot open a subtree by prefix with given root key".to_owned(), - ) - }) - .add_cost(cost) - } else { - Merk::open_base( - storage, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version, - ) - .map_err(|_| Error::CorruptedData("cannot open a root subtree by prefix".to_owned())) - .add_cost(cost) - } - } - /// Creates a checkpoint pub fn create_checkpoint>(&self, path: P) -> Result<(), Error> { self.db.create_checkpoint(path).map_err(|e| e.into()) @@ -576,27 +445,21 @@ impl GroveDb { &self, transaction: TransactionArg, grove_version: &GroveVersion, - ) -> CostResult, Error> { + ) -> CostResult>, Error> { let mut cost = OperationCost { ..Default::default() }; - root_merk_optional_tx!( - &mut cost, - self.db, - None, - transaction, - subtree, - grove_version, - { - let root_key = subtree.root_key().unwrap(); - Ok(root_key).wrap_with_cost(cost) - } - ) + let tx = TxRef::new(&self.db, transaction); + + let root_merk = + cost_return_on_error!(&mut cost, self.open_root_merk(tx.as_ref(), grove_version)); + + let root_key = root_merk.root_key(); + Ok(root_key).wrap_with_cost(cost) } /// Returns root hash of GroveDb. - /// Will be `None` if GroveDb is empty. pub fn root_hash( &self, transaction: TransactionArg, @@ -606,18 +469,34 @@ impl GroveDb { ..Default::default() }; - root_merk_optional_tx!( - &mut cost, - self.db, - None, - transaction, - subtree, - grove_version, - { - let root_hash = subtree.root_hash().unwrap_add_cost(&mut cost); - Ok(root_hash).wrap_with_cost(cost) - } - ) + let tx = TxRef::new(&self.db, transaction); + + let root_merk = + cost_return_on_error!(&mut cost, self.open_root_merk(tx.as_ref(), grove_version)); + + root_merk.root_hash().map(Ok).add_cost(cost) + } + + fn open_root_merk<'tx, 'db>( + &'db self, + tx: &'tx Transaction<'db>, + grove_version: &GroveVersion, + ) -> CostResult>, Error> { + self.db + .get_transactional_storage_context(SubtreePath::empty(), None, tx) + .flat_map(|storage_ctx| { + grovedb_merk::Merk::open_base( + storage_ctx, + TreeType::NormalTree, + Some(Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map(|merk_res| { + merk_res.map_err(|_| { + crate::Error::CorruptedData("cannot open a subtree".to_owned()) + }) + }) + }) } /// Method to propagate updated subtree key changes one level up inside a @@ -633,7 +512,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let mut child_tree = cost_return_on_error_no_add!( - &cost, + cost, merk_cache.remove(path).ok_or(Error::CorruptedCodeExecution( "Merk Cache should always contain the last path", )) @@ -688,7 +567,7 @@ impl GroveDb { let mut cost = OperationCost::default(); let mut child_tree = cost_return_on_error_no_add!( - &cost, + cost, merk_cache .remove(&path) .ok_or(Error::CorruptedCodeExecution( @@ -731,59 +610,6 @@ impl GroveDb { Ok(()).wrap_with_cost(cost) } - /// Method to propagate updated subtree key changes one level up - fn propagate_changes_without_transaction<'b, B: AsRef<[u8]>>( - &self, - mut merk_cache: HashMap, Merk>, - path: SubtreePath<'b, B>, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult<(), Error> { - let mut cost = OperationCost::default(); - - let mut child_tree = cost_return_on_error_no_add!( - &cost, - merk_cache - .remove(&path) - .ok_or(Error::CorruptedCodeExecution( - "Merk Cache should always contain the last path", - )) - ); - - let mut current_path: SubtreePath = path; - - while let Some((parent_path, parent_key)) = current_path.derive_parent() { - let mut parent_tree: Merk = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - parent_path.clone(), - Some(batch), - grove_version - ) - ); - let (root_hash, root_key, sum) = cost_return_on_error!( - &mut cost, - child_tree - .root_hash_key_and_aggregate_data() - .map_err(Error::MerkError) - ); - cost_return_on_error!( - &mut cost, - Self::update_tree_item_preserve_flag( - &mut parent_tree, - parent_key, - root_key, - root_hash, - sum, - grove_version, - ) - ); - child_tree = parent_tree; - current_path = parent_path; - } - Ok(()).wrap_with_cost(cost) - } - /// Updates a tree item and preserves flags. Returns CostResult. pub(crate) fn update_tree_item_preserve_flag<'db, K: AsRef<[u8]>, S: StorageContext<'db>>( parent_tree: &mut Merk, @@ -1147,186 +973,20 @@ impl GroveDb { allow_cache: bool, grove_version: &GroveVersion, ) -> Result { - if let Some(transaction) = transaction { - let root_merk = self - .open_transactional_merk_at_path( - SubtreePath::empty(), - transaction, - None, - grove_version, - ) - .unwrap()?; - self.verify_merk_and_submerks_in_transaction( - root_merk, - &SubtreePath::empty(), - None, - transaction, - verify_references, - allow_cache, - grove_version, - ) - } else { - let root_merk = self - .open_non_transactional_merk_at_path(SubtreePath::empty(), None, grove_version) - .unwrap()?; - self.verify_merk_and_submerks( - root_merk, - &SubtreePath::empty(), - None, - verify_references, - allow_cache, - grove_version, - ) - } - } - - /// Verifies that the root hash of the given merk and all submerks match - /// those of the merk and submerks at the given path. Returns any issues. - fn verify_merk_and_submerks<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( - &'db self, - merk: Merk, - path: &SubtreePath, - batch: Option<&'db StorageBatch>, - verify_references: bool, - allow_cache: bool, - grove_version: &GroveVersion, - ) -> Result { - let mut all_query = Query::new(); - all_query.insert_all(); - - let mut issues = HashMap::new(); - let mut element_iterator = KVIterator::new(merk.storage.raw_iter(), &all_query).unwrap(); - - while let Some((key, element_value)) = element_iterator.next_kv().unwrap() { - let element = raw_decode(&element_value, grove_version)?; - match element { - Element::SumTree(..) - | Element::Tree(..) - | Element::BigSumTree(..) - | Element::CountTree(..) - | Element::CountSumTree(..) => { - let (kv_value, element_value_hash) = merk - .get_value_and_value_hash( - &key, - allow_cache, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .unwrap() - .map_err(MerkError)? - .ok_or(Error::CorruptedData(format!( - "expected merk to contain value at key {} for {}", - hex_to_ascii(&key), - element.type_str() - )))?; - let new_path = path.derive_owned_with_child(key); - let new_path_ref = SubtreePath::from(&new_path); - - let inner_merk = self - .open_non_transactional_merk_at_path( - new_path_ref.clone(), - batch, - grove_version, - ) - .unwrap()?; - let root_hash = inner_merk.root_hash().unwrap(); - - let actual_value_hash = value_hash(&kv_value).unwrap(); - let combined_value_hash = combine_hash(&actual_value_hash, &root_hash).unwrap(); - - if combined_value_hash != element_value_hash { - issues.insert( - new_path.to_vec(), - (root_hash, combined_value_hash, element_value_hash), - ); - } - issues.extend(self.verify_merk_and_submerks( - inner_merk, - &new_path_ref, - batch, - verify_references, - true, - grove_version, - )?); - } - Element::Item(..) | Element::SumItem(..) => { - let (kv_value, element_value_hash) = merk - .get_value_and_value_hash( - &key, - allow_cache, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .unwrap() - .map_err(MerkError)? - .ok_or(Error::CorruptedData(format!( - "expected merk to contain value at key {} for {}", - hex_to_ascii(&key), - element.type_str() - )))?; - let actual_value_hash = value_hash(&kv_value).unwrap(); - if actual_value_hash != element_value_hash { - issues.insert( - path.derive_owned_with_child(key).to_vec(), - (actual_value_hash, element_value_hash, actual_value_hash), - ); - } - } - Element::Reference(ref reference_path, ..) => { - // Skip this whole check if we don't `verify_references` - if !verify_references { - continue; - } - - // Merk we're checking: - let (kv_value, element_value_hash) = merk - .get_value_and_value_hash( - &key, - allow_cache, - None::<&fn(&[u8], &GroveVersion) -> Option>, - grove_version, - ) - .unwrap() - .map_err(MerkError)? - .ok_or(Error::CorruptedData(format!( - "expected merk to contain value at key {} for reference", - hex_to_ascii(&key) - )))?; - - let referenced_value_hash = { - let full_path = path_from_reference_path_type( - reference_path.clone(), - &path.to_vec(), - Some(&key), - )?; - let item = self - .follow_reference( - (full_path.as_slice()).into(), - allow_cache, - None, - grove_version, - ) - .unwrap()?; - item.value_hash(grove_version).unwrap()? - }; - - // Take the current item (reference) hash and combine it with referenced value's - // hash - - let self_actual_value_hash = value_hash(&kv_value).unwrap(); - let combined_value_hash = - combine_hash(&self_actual_value_hash, &referenced_value_hash).unwrap(); - - if combined_value_hash != element_value_hash { - issues.insert( - path.derive_owned_with_child(key).to_vec(), - (combined_value_hash, element_value_hash, combined_value_hash), - ); - } - } - } - } - Ok(issues) + let tx = TxRef::new(&self.db, transaction); + + let root_merk = self + .open_transactional_merk_at_path(SubtreePath::empty(), tx.as_ref(), None, grove_version) + .unwrap()?; + self.verify_merk_and_submerks_in_transaction( + root_merk, + &SubtreePath::empty(), + None, + tx.as_ref(), + verify_references, + allow_cache, + grove_version, + ) } fn verify_merk_and_submerks_in_transaction<'db, B: AsRef<[u8]>, S: StorageContext<'db>>( diff --git a/grovedb/src/merk_cache.rs b/grovedb/src/merk_cache.rs new file mode 100644 index 00000000..2b737f14 --- /dev/null +++ b/grovedb/src/merk_cache.rs @@ -0,0 +1,271 @@ +//! Module dedicated to keep necessary Merks in memory. + +use std::{ + cell::{Cell, UnsafeCell}, + collections::{btree_map::Entry, BTreeMap}, +}; + +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt}; +use grovedb_merk::Merk; +use grovedb_path::SubtreePathBuilder; +use grovedb_storage::{rocksdb_storage::PrefixedRocksDbTransactionContext, StorageBatch}; +use grovedb_version::version::GroveVersion; + +use crate::{Error, GroveDb, Transaction}; + +type TxMerk<'db> = Merk>; + +/// We store Merk on heap to preserve its location as well as borrow flag +/// alongside. +type CachedMerkEntry<'db> = Box<(Cell, TxMerk<'db>)>; + +/// Structure to keep subtrees open in memory for repeated access. +pub(crate) struct MerkCache<'db, 'b, B: AsRef<[u8]>> { + db: &'db GroveDb, + pub(crate) version: &'db GroveVersion, + batch: Box, + tx: &'db Transaction<'db>, + merks: UnsafeCell, CachedMerkEntry<'db>>>, +} + +impl<'db, 'b, B: AsRef<[u8]>> MerkCache<'db, 'b, B> { + /// Initialize a new `MerkCache` instance + pub(crate) fn new( + db: &'db GroveDb, + tx: &'db Transaction<'db>, + version: &'db GroveVersion, + ) -> Self { + MerkCache { + db, + tx, + version, + merks: Default::default(), + batch: Default::default(), + } + } + + /// Gets a smart pointer to a cached Merk or opens one if needed. + pub(crate) fn get_merk<'c>( + &'c self, + path: SubtreePathBuilder<'b, B>, + ) -> CostResult, Error> { + let mut cost = Default::default(); + + // SAFETY: there are no other references to `merks` memory at the same time. + // Note while it's possible to have direct references to actual Merk trees, + // outside of the scope of this function, this map (`merks`) has + // indirect connection to them through `Box`, thus there are no overlapping + // references, and that is requirement of `UnsafeCell` we have there. + let boxed_flag_merk = match unsafe { + self.merks + .get() + .as_mut() + .expect("`UnsafeCell` is never null") + } + .entry(path) + { + Entry::Vacant(e) => { + let merk = cost_return_on_error!( + &mut cost, + self.db.open_transactional_merk_at_path( + e.key().into(), + self.tx, + // SAFETY: batch is allocated on the heap and we use only shared + // references, so as long as the `Box` allocation + // outlives those references we're safe, + // and it will outlive because Merks are dropped first. + Some(unsafe { + (&*self.batch as *const StorageBatch) + .as_ref() + .expect("`Box` is never null") + }), + self.version + ) + ); + e.insert(Box::new((false.into(), merk))) + } + Entry::Occupied(e) => e.into_mut(), + }; + + let taken_handle_ref: *const Cell = &boxed_flag_merk.0 as *const _; + let merk_ptr: *mut TxMerk<'db> = &mut boxed_flag_merk.1 as *mut _; + + // SAFETY: `MerkHandle` contains two references to the heap allocated memory, + // and we want to be sure that the referenced data will outlive those + // references plus borrowing rules aren't violated (one `&mut` or many + // `&` with no `&mut` at a time). + // + // To make sure changes to the map won't affect existing borrows we have an + // indirection in a form of `Box`, that allows us to move and update + // `MerkCache` with new subtrees and possible reallocations without breaking + // `MerkHandle`'s references. We use `UnsafeCell` to connect lifetimes and check + // in compile time that `MerkHandle`s won't outlive the cache, even though we + // don't hold any references to it, but `&mut` reference would make this borrow + // exclusive for the whole time of `MerkHandle`, so it shall go intially through + // a shared reference. + // + // Borrowing rules are covered using a borrow flag of each Merk: + // 1. Borrow flag's reference points to a heap allocated memory and will remain + // valid. Since the reference is shared and no need to obtain a `&mut` + // reference this part of the memory is covered. + // 2. For the same reason the Merk's pointer can be converted to a reference, + // because the memory behind the `Box` is valid and `MerkHandle` can't + // outlive it since we use lifetime parameters. + // 3. We can get unique reference out of that pointer safely because of + // borrowing flag. + Ok(unsafe { + MerkHandle { + merk: merk_ptr, + taken_handle: taken_handle_ref + .as_ref() + .expect("`Box` contents are never null"), + } + }) + .wrap_with_cost(cost) + } + + /// Consumes `MerkCache` into accumulated batch of uncommited operations + /// with subtrees' root hash propagation done. + pub(crate) fn into_batch(mut self) -> CostResult, Error> { + let mut cost = Default::default(); + cost_return_on_error!(&mut cost, self.propagate_subtrees()); + + // SAFETY: By this time all subtrees are taken and dropped during + // propagation, so there are no more references to the batch and in can be + // safely released into the world. + Ok(self.batch).wrap_with_cost(cost) + } + + fn propagate_subtrees(&mut self) -> CostResult<(), Error> { + let mut cost = Default::default(); + + // This relies on [SubtreePath]'s ordering implementation to put the deepest + // path's first. + while let Some((path, flag_and_merk)) = self.merks.get_mut().pop_first() { + let merk = flag_and_merk.1; + if let Some((parent_path, parent_key)) = path.derive_parent_owned() { + let mut parent_merk = cost_return_on_error!(&mut cost, self.get_merk(parent_path)); + + let (root_hash, root_key, aggregate_data) = cost_return_on_error!( + &mut cost, + merk.root_hash_key_and_aggregate_data() + .map_err(Error::MerkError) + ); + cost_return_on_error!( + &mut cost, + parent_merk.for_merk(|m| GroveDb::update_tree_item_preserve_flag( + m, + parent_key, + root_key, + root_hash, + aggregate_data, + self.version, + )) + ); + } + } + + Ok(()).wrap_with_cost(cost) + } +} + +/// Wrapper over `Merk` tree to manage unqiue borrow dynamically. +#[derive(Clone)] +pub(crate) struct MerkHandle<'db, 'c> { + merk: *mut TxMerk<'db>, + taken_handle: &'c Cell, +} + +impl<'db> MerkHandle<'db, '_> { + pub(crate) fn for_merk(&mut self, f: impl FnOnce(&mut TxMerk<'db>) -> T) -> T { + if self.taken_handle.get() { + panic!("Attempt to have double &mut borrow on Merk"); + } + + self.taken_handle.set(true); + + // SAFETY: here we want to have `&mut` reference to Merk out of a pointer, there + // is a checklist for that: + // 1. Memory is valid, because `MerkHandle` can't outlive `MerkCache` and heap + // allocated Merks stay at their place for the whole `MerkCache` lifetime. + // 2. No other references exist because of `taken_handle` check above. + let result = f(unsafe { self.merk.as_mut().expect("`Box` contents are never null") }); + + self.taken_handle.set(false); + + result + } +} + +#[cfg(test)] +mod tests { + use grovedb_path::SubtreePath; + use grovedb_storage::StorageBatch; + use grovedb_version::version::GroveVersion; + + use super::MerkCache; + use crate::{ + tests::{make_deep_tree, make_test_grovedb, TEST_LEAF}, + Element, + }; + + #[test] + #[should_panic] + fn cant_borrow_twice() { + let version = GroveVersion::latest(); + let db = make_test_grovedb(&version); + let tx = db.start_transaction(); + + let cache = MerkCache::new(&db, &tx, version); + + let mut merk1 = cache + .get_merk(SubtreePath::empty().derive_owned()) + .unwrap() + .unwrap(); + let mut merk2 = cache + .get_merk(SubtreePath::empty().derive_owned()) + .unwrap() + .unwrap(); + + merk1.for_merk(|_m1| { + merk2.for_merk(|_m2| { + // this shouldn't happen + }) + }); + } + + #[test] + fn subtrees_are_propagated() { + let version = GroveVersion::latest(); + let db = make_deep_tree(&version); + let tx = db.start_transaction(); + + let path = SubtreePath::from(&[TEST_LEAF, b"innertree"]); + let item = Element::new_item(b"hello".to_vec()); + + let no_propagation_ops_count = { + let batch = StorageBatch::new(); + + let mut merk = db + .open_transactional_merk_at_path(path.clone(), &tx, Some(&batch), &version) + .unwrap() + .unwrap(); + + item.insert(&mut merk, b"k1", None, &version) + .unwrap() + .unwrap(); + + batch.len() + }; + + let cache = MerkCache::new(&db, &tx, version); + + let mut merk = cache.get_merk(path.derive_owned()).unwrap().unwrap(); + + merk.for_merk(|m| item.insert(m, b"k1", None, &version).unwrap().unwrap()); + + drop(merk); + + assert!(cache.into_batch().unwrap().unwrap().len() > no_propagation_ops_count); + } +} diff --git a/grovedb/src/operations/auxiliary.rs b/grovedb/src/operations/auxiliary.rs index 493e6e74..85596023 100644 --- a/grovedb/src/operations/auxiliary.rs +++ b/grovedb/src/operations/auxiliary.rs @@ -28,22 +28,16 @@ //! Auxiliary operations -#[cfg(feature = "minimal")] use grovedb_costs::{ - cost_return_on_error, cost_return_on_error_no_add, - storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, OperationCost, + cost_return_on_error, storage_cost::key_value_cost::KeyValueStorageCost, CostResult, CostsExt, + OperationCost, }; use grovedb_path::SubtreePath; -#[cfg(feature = "minimal")] -use grovedb_storage::StorageContext; -use grovedb_storage::{Storage, StorageBatch}; +use grovedb_storage::{Storage, StorageContext}; use grovedb_version::version::GroveVersion; -use crate::util::storage_context_optional_tx; -#[cfg(feature = "minimal")] -use crate::{util::meta_storage_context_optional_tx, Element, Error, GroveDb, TransactionArg}; +use crate::{util::TxRef, Element, Error, GroveDb, TransactionArg}; -#[cfg(feature = "minimal")] impl GroveDb { /// Put op for aux storage pub fn put_aux>( @@ -54,23 +48,29 @@ impl GroveDb { transaction: TransactionArg, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - let batch = StorageBatch::new(); - - meta_storage_context_optional_tx!(self.db, Some(&batch), transaction, aux_storage, { - cost_return_on_error_no_add!( - &cost, - aux_storage - .unwrap_add_cost(&mut cost) - .put_aux(key.as_ref(), value, cost_info) - .unwrap_add_cost(&mut cost) - .map_err(|e| e.into()) - ); - }); - - self.db - .commit_multi_context_batch(batch, transaction) - .add_cost(cost) - .map_err(Into::into) + let tx = TxRef::new(&self.db, transaction); + let batch = Default::default(); + + let aux_storage = self + .db + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), tx.as_ref()) + .unwrap_add_cost(&mut cost); + + cost_return_on_error!( + &mut cost, + aux_storage + .put_aux(key.as_ref(), value, cost_info) + .map_err(Into::into) + ); + + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(batch, Some(tx.as_ref())) + .map_err(Into::into) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Delete op for aux storage @@ -81,23 +81,29 @@ impl GroveDb { transaction: TransactionArg, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); - let batch = StorageBatch::new(); - - meta_storage_context_optional_tx!(self.db, Some(&batch), transaction, aux_storage, { - cost_return_on_error_no_add!( - &cost, - aux_storage - .unwrap_add_cost(&mut cost) - .delete_aux(key.as_ref(), cost_info) - .unwrap_add_cost(&mut cost) - .map_err(|e| e.into()) - ); - }); - - self.db - .commit_multi_context_batch(batch, transaction) - .add_cost(cost) - .map_err(Into::into) + let tx = TxRef::new(&self.db, transaction); + let batch = Default::default(); + + let aux_storage = self + .db + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), tx.as_ref()) + .unwrap_add_cost(&mut cost); + + cost_return_on_error!( + &mut cost, + aux_storage + .delete_aux(key.as_ref(), cost_info) + .map_err(|e| e.into()) + ); + + cost_return_on_error!( + &mut cost, + self.db + .commit_multi_context_batch(batch, Some(tx.as_ref())) + .map_err(Into::into) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Get op for aux storage @@ -107,19 +113,17 @@ impl GroveDb { transaction: TransactionArg, ) -> CostResult>, Error> { let mut cost = OperationCost::default(); + let tx = TxRef::new(&self.db, transaction); + + let aux_storage = self + .db + .get_transactional_storage_context(SubtreePath::empty(), None, tx.as_ref()) + .unwrap_add_cost(&mut cost); - meta_storage_context_optional_tx!(self.db, None, transaction, aux_storage, { - let value = cost_return_on_error_no_add!( - &cost, - aux_storage - .unwrap_add_cost(&mut cost) - .get_aux(key) - .unwrap_add_cost(&mut cost) - .map_err(|e| e.into()) - ); - - Ok(value).wrap_with_cost(cost) - }) + aux_storage + .get_aux(key.as_ref()) + .map_err(|e| e.into()) + .add_cost(cost) } // TODO: dumb traversal should not be tolerated @@ -148,23 +152,26 @@ impl GroveDb { let mut queue: Vec>> = vec![path.to_vec()]; let mut result: Vec>> = queue.clone(); + let tx = TxRef::new(&self.db, transaction); + while let Some(q) = queue.pop() { let subtree_path: SubtreePath> = q.as_slice().into(); // Get the correct subtree with q_ref as path - storage_context_optional_tx!(self.db, subtree_path, None, transaction, storage, { - let storage = storage.unwrap_add_cost(&mut cost); - let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); - while let Some((key, value)) = - cost_return_on_error!(&mut cost, raw_iter.next_element(grove_version)) - { - if value.is_any_tree() { - let mut sub_path = q.clone(); - sub_path.push(key.to_vec()); - queue.push(sub_path.clone()); - result.push(sub_path); - } + let storage = self + .db + .get_transactional_storage_context(subtree_path, None, tx.as_ref()) + .unwrap_add_cost(&mut cost); + let mut raw_iter = Element::iterator(storage.raw_iter()).unwrap_add_cost(&mut cost); + while let Some((key, value)) = + cost_return_on_error!(&mut cost, raw_iter.next_element(grove_version)) + { + if value.is_any_tree() { + let mut sub_path = q.clone(); + sub_path.push(key.to_vec()); + queue.push(sub_path.clone()); + result.push(sub_path); } - }) + } } Ok(result).wrap_with_cost(cost) } diff --git a/grovedb/src/operations/delete/average_case.rs b/grovedb/src/operations/delete/average_case.rs index 6e8b0158..cab1e182 100644 --- a/grovedb/src/operations/delete/average_case.rs +++ b/grovedb/src/operations/delete/average_case.rs @@ -12,9 +12,7 @@ use grovedb_merk::{ HASH_LENGTH_U32, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use intmap::IntMap; use crate::{ @@ -68,11 +66,11 @@ impl GroveDb { estimated_element_size, tree_type, ) = cost_return_on_error_no_add!( - &cost, + cost, if height == path_len - 1 { if let Some(layer_info) = estimated_layer_info.get(height) { let estimated_value_len = cost_return_on_error_no_add!( - &cost, + cost, layer_info .estimated_layer_sizes .value_with_feature_and_flags_size(grove_version) @@ -97,7 +95,7 @@ impl GroveDb { used_path = smaller_path; if let Some(layer_info) = estimated_layer_info.get(height) { let estimated_value_len = cost_return_on_error_no_add!( - &cost, + cost, layer_info .estimated_layer_sizes .subtree_with_feature_and_flags_size(grove_version) @@ -159,7 +157,7 @@ impl GroveDb { if validate { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_merk_at_path::( &mut cost, path, @@ -171,7 +169,7 @@ impl GroveDb { } if check_if_tree { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_average_case_get_raw_cost::( &mut cost, path, diff --git a/grovedb/src/operations/delete/delete_up_tree.rs b/grovedb/src/operations/delete/delete_up_tree.rs index 2b167374..633b7abb 100644 --- a/grovedb/src/operations/delete/delete_up_tree.rs +++ b/grovedb/src/operations/delete/delete_up_tree.rs @@ -7,13 +7,11 @@ use grovedb_costs::{ }; use grovedb_merk::MaybeTree; use grovedb_path::SubtreePath; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use crate::{ - batch::QualifiedGroveDbOp, operations::delete::DeleteOptions, ElementFlags, Error, GroveDb, - TransactionArg, + batch::QualifiedGroveDbOp, operations::delete::DeleteOptions, util::TxRef, ElementFlags, Error, + GroveDb, TransactionArg, }; #[cfg(feature = "minimal")] @@ -139,7 +137,7 @@ impl GroveDb { ); let ops = cost_return_on_error_no_add!( - &cost, + cost, if let Some(stop_path_height) = options.stop_path_height { maybe_ops.ok_or_else(|| { Error::DeleteUpTreeStopHeightMoreThanInitialPathSize(format!( @@ -223,10 +221,13 @@ impl GroveDb { return Ok(None).wrap_with_cost(cost); } } + + let tx = TxRef::new(&self.db, transaction); + if options.validate_tree_at_path_exists { cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) + self.check_subtree_exists_path_not_found(path.clone(), tx.as_ref(), grove_version) ); } if let Some(delete_operation_this_level) = cost_return_on_error!( @@ -237,7 +238,7 @@ impl GroveDb { &options.to_delete_options(), is_known_to_be_subtree, current_batch_operations, - transaction, + Some(tx.as_ref()), grove_version, ) ) { diff --git a/grovedb/src/operations/delete/mod.rs b/grovedb/src/operations/delete/mod.rs index 8bf2e0ea..667a7ad8 100644 --- a/grovedb/src/operations/delete/mod.rs +++ b/grovedb/src/operations/delete/mod.rs @@ -25,20 +25,19 @@ use grovedb_merk::{Error as MerkError, Merk, MerkOptions}; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext}, - Storage, StorageBatch, StorageContext, -}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, + rocksdb_storage::PrefixedRocksDbTransactionContext, Storage, StorageBatch, StorageContext, }; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use crate::{ batch::{GroveOp, QualifiedGroveDbOp}, - util::storage_context_with_parent_optional_tx, Element, ElementFlags, Error, GroveDb, Transaction, TransactionArg, }; -use crate::{raw_decode, util::merk_optional_tx_path_not_empty}; +use crate::{ + raw_decode, + util::{compat, TxRef}, +}; #[cfg(feature = "minimal")] #[derive(Clone)] @@ -119,15 +118,20 @@ impl GroveDb { grove_version.grovedb_versions.operations.delete.delete ); + let tx = TxRef::new(&self.db, transaction); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); - let collect_costs = self - .delete_internal( + let mut cost = Default::default(); + + cost_return_on_error!( + &mut cost, + self.delete_internal_on_transaction( path.into(), key, &options, - transaction, + tx.as_ref(), &mut |_, removed_key_bytes, removed_value_bytes| { Ok(( BasicStorageRemoval(removed_key_bytes), @@ -137,13 +141,17 @@ impl GroveDb { &batch, grove_version, ) - .map_ok(|_| ()); + .map_ok(|_| ()) + ); - collect_costs.flat_map_ok(|_| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - }) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Delete all elements in a specified subtree @@ -187,120 +195,52 @@ impl GroveDb { .clear_subtree ); + let tx = TxRef::new(&self.db, transaction); + let subtree_path: SubtreePath = path.into(); let mut cost = OperationCost::default(); let batch = StorageBatch::new(); let options = options.unwrap_or_default(); - if let Some(transaction) = transaction { - let mut merk_to_clear = cost_return_on_error!( - &mut cost, - self.open_transactional_merk_at_path( - subtree_path.clone(), - transaction, - Some(&batch), - grove_version, - ) - ); - - if options.check_for_subtrees { - let mut all_query = Query::new(); - all_query.insert_all(); - - let mut element_iterator = - KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); - - // delete all nested subtrees - while let Some((key, element_value)) = - element_iterator.next_kv().unwrap_add_cost(&mut cost) - { - let element = raw_decode(&element_value, grove_version).unwrap(); - if element.is_any_tree() { - if options.allow_deleting_subtrees { - cost_return_on_error!( - &mut cost, - self.delete( - subtree_path.clone(), - key.as_slice(), - Some(DeleteOptions { - allow_deleting_non_empty_trees: true, - deleting_non_empty_trees_returns_error: false, - ..Default::default() - }), - Some(transaction), - grove_version, - ) - ); - } else if options.trying_to_clear_with_subtrees_returns_error { - return Err(Error::ClearingTreeWithSubtreesNotAllowed( - "options do not allow to clear this merk tree as it contains \ - subtrees", - )) - .wrap_with_cost(cost); - } else { - return Ok(false).wrap_with_cost(cost); - } - } - } - } - - // delete non subtree values - cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); - - // propagate changes - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - merk_cache.insert(subtree_path.clone(), merk_to_clear); - cost_return_on_error!( - &mut cost, - self.propagate_changes_with_transaction( - merk_cache, - subtree_path.clone(), - transaction, - &batch, - grove_version, - ) - ); - } else { - let mut merk_to_clear = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - subtree_path.clone(), - Some(&batch), - grove_version - ) - ); + let mut merk_to_clear = cost_return_on_error!( + &mut cost, + self.open_transactional_merk_at_path( + subtree_path.clone(), + tx.as_ref(), + Some(&batch), + grove_version, + ) + ); - if options.check_for_subtrees { - let mut all_query = Query::new(); - all_query.insert_all(); + if options.check_for_subtrees { + let mut all_query = Query::new(); + all_query.insert_all(); - let mut element_iterator = - KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); + let mut element_iterator = + KVIterator::new(merk_to_clear.storage.raw_iter(), &all_query).unwrap(); - // delete all nested subtrees - while let Some((key, element_value)) = - element_iterator.next_kv().unwrap_add_cost(&mut cost) - { - let element = raw_decode(&element_value, grove_version).unwrap(); + // delete all nested subtrees + while let Some((key, element_value)) = + element_iterator.next_kv().unwrap_add_cost(&mut cost) + { + let element = raw_decode(&element_value, grove_version).unwrap(); + if element.is_any_tree() { if options.allow_deleting_subtrees { - if element.is_any_tree() { - cost_return_on_error!( - &mut cost, - self.delete( - subtree_path.clone(), - key.as_slice(), - Some(DeleteOptions { - allow_deleting_non_empty_trees: true, - deleting_non_empty_trees_returns_error: false, - ..Default::default() - }), - None, - grove_version, - ) - ); - } + cost_return_on_error!( + &mut cost, + self.delete( + subtree_path.clone(), + key.as_slice(), + Some(DeleteOptions { + allow_deleting_non_empty_trees: true, + deleting_non_empty_trees_returns_error: false, + ..Default::default() + }), + Some(tx.as_ref()), + grove_version, + ) + ); } else if options.trying_to_clear_with_subtrees_returns_error { return Err(Error::ClearingTreeWithSubtreesNotAllowed( "options do not allow to clear this merk tree as it contains subtrees", @@ -311,33 +251,34 @@ impl GroveDb { } } } + } - // delete non subtree values - cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); + // delete non subtree values + cost_return_on_error!(&mut cost, merk_to_clear.clear().map_err(Error::MerkError)); - // propagate changes - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - merk_cache.insert(subtree_path.clone(), merk_to_clear); - cost_return_on_error!( - &mut cost, - self.propagate_changes_without_transaction( - merk_cache, - subtree_path.clone(), - &batch, - grove_version, - ) - ); - } + // propagate changes + let mut merk_cache: HashMap, Merk> = + HashMap::default(); + merk_cache.insert(subtree_path.clone(), merk_to_clear); + cost_return_on_error!( + &mut cost, + self.propagate_changes_with_transaction( + merk_cache, + subtree_path.clone(), + tx.as_ref(), + &batch, + grove_version, + ) + ); cost_return_on_error!( &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) ); - Ok(true).wrap_with_cost(cost) + tx.commit_local().map(|_| true).wrap_with_cost(cost) } /// Delete element with sectional storage function @@ -366,15 +307,20 @@ impl GroveDb { .delete_with_sectional_storage_function ); + let tx = TxRef::new(&self.db, transaction); + let options = options.unwrap_or_default(); let batch = StorageBatch::new(); - let collect_costs = self - .delete_internal( + let mut cost = Default::default(); + + cost_return_on_error!( + &mut cost, + self.delete_internal_on_transaction( path, key, &options, - transaction, + tx.as_ref(), &mut |value, removed_key_bytes, removed_value_bytes| { let mut element = Element::deserialize(value.as_slice(), grove_version) .map_err(|e| MerkError::ClientCorruptionError(e.to_string()))?; @@ -395,13 +341,16 @@ impl GroveDb { &batch, grove_version, ) - .map_ok(|_| ()); + ); - collect_costs.flat_map_ok(|_| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - }) + ); + + tx.commit_local().wrap_with_cost(cost) } /// Delete if an empty tree @@ -425,28 +374,36 @@ impl GroveDb { .delete_if_empty_tree ); + let mut cost = Default::default(); + let batch = StorageBatch::new(); + let tx = TxRef::new(&self.db, transaction); - let collect_costs = self.delete_if_empty_tree_with_sectional_storage_function( - path.into(), - key, - transaction, - &mut |_, removed_key_bytes, removed_value_bytes| { - Ok(( - BasicStorageRemoval(removed_key_bytes), - BasicStorageRemoval(removed_value_bytes), - )) - }, - &batch, - grove_version, + let result = cost_return_on_error!( + &mut cost, + self.delete_if_empty_tree_with_sectional_storage_function( + path.into(), + key, + tx.as_ref(), + &mut |_, removed_key_bytes, removed_value_bytes| { + Ok(( + BasicStorageRemoval(removed_key_bytes), + BasicStorageRemoval(removed_value_bytes), + )) + }, + &batch, + grove_version, + ) ); - collect_costs.flat_map_ok(|r| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - .map_ok(|_| r) - }) + ); + + tx.commit_local().map(|_| result).wrap_with_cost(cost) } /// Delete if an empty tree with section storage function @@ -454,7 +411,7 @@ impl GroveDb { &self, path: SubtreePath, key: &[u8], - transaction: TransactionArg, + transaction: &Transaction, split_removal_bytes_function: &mut impl FnMut( &mut ElementFlags, u32, // key removed bytes @@ -481,7 +438,7 @@ impl GroveDb { ..Default::default() }; - self.delete_internal( + self.delete_internal_on_transaction( path, key, &options, @@ -526,6 +483,8 @@ impl GroveDb { .delete_operation_for_delete_internal ); + let tx = TxRef::new(&self.db, transaction); + let mut cost = OperationCost::default(); if path.is_root() { @@ -540,7 +499,7 @@ impl GroveDb { &mut cost, self.check_subtree_exists_path_not_found( path.clone(), - transaction, + tx.as_ref(), grove_version ) ); @@ -549,7 +508,7 @@ impl GroveDb { None => { let element = cost_return_on_error!( &mut cost, - self.get_raw(path.clone(), key.as_ref(), transaction, grove_version) + self.get_raw(path.clone(), key.as_ref(), Some(tx.as_ref()), grove_version) ); element.maybe_tree_type() } @@ -574,21 +533,21 @@ impl GroveDb { _ => None, }) .collect::>(); - let mut is_empty = merk_optional_tx_path_not_empty!( + let subtree = cost_return_on_error!( &mut cost, - self.db, - SubtreePath::from(&subtree_merk_path), - None, - transaction, - subtree, - grove_version, - { - subtree - .is_empty_tree_except(batch_deleted_keys) - .unwrap_add_cost(&mut cost) - } + compat::merk_optional_tx_path_not_empty( + &self.db, + SubtreePath::from(&subtree_merk_path), + tx.as_ref(), + None, + grove_version, + ) ); + let mut is_empty = subtree + .is_empty_tree_except(batch_deleted_keys) + .unwrap_add_cost(&mut cost); + // If there is any current batch operation that is inserting something in this // tree then it is not empty either is_empty &= !current_batch_operations.iter().any(|op| match op.op { @@ -628,45 +587,6 @@ impl GroveDb { } } - fn delete_internal>( - &self, - path: SubtreePath, - key: &[u8], - options: &DeleteOptions, - transaction: TransactionArg, - sectioned_removal: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - MerkError, - >, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult { - if let Some(transaction) = transaction { - self.delete_internal_on_transaction( - path, - key, - options, - transaction, - sectioned_removal, - batch, - grove_version, - ) - } else { - self.delete_internal_without_transaction( - path, - key, - options, - sectioned_removal, - batch, - grove_version, - ) - } - } - fn delete_internal_on_transaction>( &self, path: SubtreePath, @@ -864,131 +784,6 @@ impl GroveDb { Ok(true).wrap_with_cost(cost) } - - fn delete_internal_without_transaction>( - &self, - path: SubtreePath, - key: &[u8], - options: &DeleteOptions, - sectioned_removal: &mut impl FnMut( - &Vec, - u32, - u32, - ) -> Result< - (StorageRemovedBytes, StorageRemovedBytes), - MerkError, - >, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult { - check_grovedb_v0_with_cost!( - "delete_internal_without_transaction", - grove_version - .grovedb_versions - .operations - .delete - .delete_internal_without_transaction - ); - - let mut cost = OperationCost::default(); - - let element = cost_return_on_error!( - &mut cost, - self.get_raw(path.clone(), key.as_ref(), None, grove_version) - ); - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - let mut subtree_to_delete_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.clone(), Some(batch), grove_version) - ); - let uses_sum_tree = subtree_to_delete_from.tree_type; - if element.is_any_tree() { - let subtree_merk_path = path.derive_owned_with_child(key); - let subtree_of_tree_we_are_deleting = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path( - SubtreePath::from(&subtree_merk_path), - Some(batch), - grove_version, - ) - ); - let is_empty = subtree_of_tree_we_are_deleting - .is_empty_tree() - .unwrap_add_cost(&mut cost); - - if !options.allow_deleting_non_empty_trees && !is_empty { - return if options.deleting_non_empty_trees_returns_error { - Err(Error::DeletingNonEmptyTree( - "trying to do a delete operation for a non empty tree, but options not \ - allowing this", - )) - .wrap_with_cost(cost) - } else { - Ok(false).wrap_with_cost(cost) - }; - } else { - if !is_empty { - let subtrees_paths = cost_return_on_error!( - &mut cost, - self.find_subtrees( - &SubtreePath::from(&subtree_merk_path), - None, - grove_version - ) - ); - // TODO: dumb traversal should not be tolerated - for subtree_path in subtrees_paths.into_iter().rev() { - let p: SubtreePath<_> = subtree_path.as_slice().into(); - let mut inner_subtree_to_delete_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(p, Some(batch), grove_version) - ); - cost_return_on_error!( - &mut cost, - inner_subtree_to_delete_from.clear().map_err(|e| { - Error::CorruptedData(format!( - "unable to cleanup tree from storage: {e}", - )) - }) - ); - } - } - cost_return_on_error!( - &mut cost, - Element::delete_with_sectioned_removal_bytes( - &mut subtree_to_delete_from, - key, - Some(options.as_merk_options()), - true, - uses_sum_tree, - sectioned_removal, - grove_version, - ) - ); - } - } else { - cost_return_on_error!( - &mut cost, - Element::delete_with_sectioned_removal_bytes( - &mut subtree_to_delete_from, - key, - Some(options.as_merk_options()), - false, - uses_sum_tree, - sectioned_removal, - grove_version, - ) - ); - } - merk_cache.insert(path.clone(), subtree_to_delete_from); - cost_return_on_error!( - &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) - ); - - Ok(true).wrap_with_cost(cost) - } } #[cfg(feature = "minimal")] @@ -1882,9 +1677,13 @@ mod tests { .unwrap() .unwrap(); assert!(!matches!(key1_tree, Element::Tree(None, _))); + + let transaction = db.start_transaction(); + let key1_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, None, grove_version, ) @@ -1946,9 +1745,12 @@ mod tests { .unwrap(); assert!(matches!(key1_tree, Element::Tree(None, _))); + let transaction = db.start_transaction(); + let key1_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, None, grove_version, ) diff --git a/grovedb/src/operations/delete/worst_case.rs b/grovedb/src/operations/delete/worst_case.rs index 89454149..e5d8b563 100644 --- a/grovedb/src/operations/delete/worst_case.rs +++ b/grovedb/src/operations/delete/worst_case.rs @@ -8,9 +8,7 @@ use grovedb_merk::{ tree_type::TreeType, }; use grovedb_storage::{worst_case_costs::WorstKeyLength, Storage}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; use intmap::IntMap; use crate::{ @@ -62,7 +60,7 @@ impl GroveDb { max_element_size, tree_type, ) = cost_return_on_error_no_add!( - &cost, + cost, if height == path_len { if let Some((tree_type, _)) = intermediate_tree_info.get(height as u64) { Ok((used_path, key, true, 0, max_element_size, *tree_type)) @@ -134,7 +132,7 @@ impl GroveDb { if validate { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_merk_at_path::( &mut cost, path, @@ -145,7 +143,7 @@ impl GroveDb { } if check_if_tree { cost_return_on_error_no_add!( - &cost, + cost, GroveDb::add_worst_case_get_raw_cost::( &mut cost, path, diff --git a/grovedb/src/operations/get/average_case.rs b/grovedb/src/operations/get/average_case.rs index d920ad01..c70e90b6 100644 --- a/grovedb/src/operations/get/average_case.rs +++ b/grovedb/src/operations/get/average_case.rs @@ -6,7 +6,7 @@ use grovedb_costs::OperationCost; use grovedb_merk::tree_type::TreeType; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; use crate::Error; #[cfg(feature = "minimal")] diff --git a/grovedb/src/operations/get/mod.rs b/grovedb/src/operations/get/mod.rs index d52fe4b1..9eba55bc 100644 --- a/grovedb/src/operations/get/mod.rs +++ b/grovedb/src/operations/get/mod.rs @@ -4,6 +4,7 @@ mod average_case; #[cfg(feature = "minimal")] mod query; +use grovedb_storage::Storage; #[cfg(feature = "minimal")] pub use query::QueryItemOrSumReturnType; #[cfg(feature = "estimated_costs")] @@ -18,16 +19,14 @@ use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use crate::error::GroveDbErrorExt; +use crate::util::TxRef; #[cfg(feature = "minimal")] use crate::{ reference_path::{path_from_reference_path_type, path_from_reference_qualified_path_type}, - util::storage_context_optional_tx, Element, Error, GroveDb, Transaction, TransactionArg, }; @@ -213,17 +212,15 @@ impl GroveDb { .get_raw_caching_optional ); - if let Some(transaction) = transaction { - self.get_raw_on_transaction_caching_optional( - path, - key, - allow_cache, - transaction, - grove_version, - ) - } else { - self.get_raw_without_transaction_caching_optional(path, key, allow_cache, grove_version) - } + let tx = TxRef::new(&self.db, transaction); + + self.get_raw_on_transaction_caching_optional( + path, + key, + allow_cache, + tx.as_ref(), + grove_version, + ) } /// Get Element at specified path and key @@ -266,22 +263,15 @@ impl GroveDb { .get_raw_optional_caching_optional ); - if let Some(transaction) = transaction { - self.get_raw_optional_on_transaction_caching_optional( - path, - key, - allow_cache, - transaction, - grove_version, - ) - } else { - self.get_raw_optional_without_transaction_caching_optional( - path, - key, - allow_cache, - grove_version, - ) - } + let tx = TxRef::new(&self.db, transaction); + + self.get_raw_optional_on_transaction_caching_optional( + path, + key, + allow_cache, + tx.as_ref(), + grove_version, + ) } /// Get tree item without following references @@ -329,67 +319,7 @@ impl GroveDb { }) .unwrap_add_cost(&mut cost); let merk = cost_return_on_error_no_add!( - &cost, - match merk_result { - Ok(result) => Ok(Some(result)), - Err(Error::PathParentLayerNotFound(_)) | Err(Error::InvalidParentLayerPath(_)) => - Ok(None), - Err(e) => Err(e), - } - ); - - if let Some(merk_to_get_from) = merk { - Element::get_optional(&merk_to_get_from, key, allow_cache, grove_version).add_cost(cost) - } else { - Ok(None).wrap_with_cost(cost) - } - } - - /// Get tree item without following references - pub(crate) fn get_raw_without_transaction_caching_optional>( - &self, - path: SubtreePath, - key: &[u8], - allow_cache: bool, - grove_version: &GroveVersion, - ) -> CostResult { - let mut cost = OperationCost::default(); - - let merk_to_get_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.clone(), None, grove_version) - .map_err(|e| match e { - Error::InvalidParentLayerPath(s) => { - Error::PathParentLayerNotFound(s) - } - _ => e, - }) - ); - - Element::get(&merk_to_get_from, key, allow_cache, grove_version) - .add_context(format!("path is {}", path)) - .add_cost(cost) - } - - /// Get tree item without following references - pub(crate) fn get_raw_optional_without_transaction_caching_optional>( - &self, - path: SubtreePath, - key: &[u8], - allow_cache: bool, - grove_version: &GroveVersion, - ) -> CostResult, Error> { - let mut cost = OperationCost::default(); - - let merk_result = self - .open_non_transactional_merk_at_path(path, None, grove_version) - .map_err(|e| match e { - Error::InvalidParentLayerPath(s) => Error::PathParentLayerNotFound(s), - _ => e, - }) - .unwrap_add_cost(&mut cost); - let merk = cost_return_on_error_no_add!( - &cost, + cost, match merk_result { Ok(result) => Ok(Some(result)), Err(Error::PathParentLayerNotFound(_)) | Err(Error::InvalidParentLayerPath(_)) => @@ -423,23 +353,25 @@ impl GroveDb { grove_version.grovedb_versions.operations.get.has_raw ); + let tx = TxRef::new(&self.db, transaction); + // Merk's items should be written into data storage and checked accordingly - storage_context_optional_tx!(self.db, path.into(), None, transaction, storage, { - storage.flat_map(|s| s.get(key).map_err(|e| e.into()).map_ok(|x| x.is_some())) - }) + self.db + .get_transactional_storage_context(path.into(), None, tx.as_ref()) + .flat_map(|s| s.get(key).map_err(|e| e.into()).map_ok(|x| x.is_some())) } fn check_subtree_exists>( &self, path: SubtreePath, - transaction: TransactionArg, + transaction: &Transaction, error_fn: impl FnOnce() -> Error, grove_version: &GroveVersion, ) -> CostResult<(), Error> { let mut cost = OperationCost::default(); if let Some((parent_path, parent_key)) = path.derive_parent() { - let element = if let Some(transaction) = transaction { + let element = { let merk_to_get_from = cost_return_on_error!( &mut cost, self.open_transactional_merk_at_path( @@ -450,14 +382,6 @@ impl GroveDb { ) ); - Element::get(&merk_to_get_from, parent_key, true, grove_version) - .add_context(format!("path is {}", path)) - } else { - let merk_to_get_from = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(parent_path, None, grove_version) - ); - Element::get(&merk_to_get_from, parent_key, true, grove_version) .add_context(format!("path is {}", path)) } @@ -480,7 +404,7 @@ impl GroveDb { pub(crate) fn check_subtree_exists_path_not_found<'b, B>( &self, path: SubtreePath<'b, B>, - transaction: TransactionArg, + transaction: &Transaction, grove_version: &GroveVersion, ) -> CostResult<(), Error> where @@ -518,9 +442,11 @@ impl GroveDb { .check_subtree_exists_invalid_path ); + let tx = TxRef::new(&self.db, transaction); + self.check_subtree_exists( path, - transaction, + tx.as_ref(), || Error::InvalidPath("subtree doesn't exist".to_owned()), grove_version, ) diff --git a/grovedb/src/operations/get/query.rs b/grovedb/src/operations/get/query.rs index c1b75468..d82c4f03 100644 --- a/grovedb/src/operations/get/query.rs +++ b/grovedb/src/operations/get/query.rs @@ -5,9 +5,7 @@ use grovedb_costs::cost_return_on_error_default; use grovedb_costs::{ cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt, OperationCost, }; -use grovedb_version::{ - check_grovedb_v0, check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0, check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "minimal")] use integer_encoding::VarInt; @@ -140,7 +138,7 @@ where { let mut cost = OperationCost::default(); let query = cost_return_on_error_no_add!( - &cost, + cost, PathQuery::merge(path_queries.to_vec(), grove_version) ); let (result, _) = cost_return_on_error!( @@ -278,7 +276,7 @@ where { }) .collect::, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((QueryResultElements { elements: results }, skipped)).wrap_with_cost(cost) } @@ -368,7 +366,7 @@ where { }) .collect::>, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((results, skipped)).wrap_with_cost(cost) } @@ -488,7 +486,7 @@ where { }) .collect::, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((results, skipped)).wrap_with_cost(cost) } @@ -574,7 +572,7 @@ where { }) .collect::, Error>>(); - let results = cost_return_on_error_no_add!(&cost, results_wrapped); + let results = cost_return_on_error_no_add!(cost, results_wrapped); Ok((results, skipped)).wrap_with_cost(cost) } @@ -639,7 +637,7 @@ where { let mut cost = OperationCost::default(); let terminal_keys = cost_return_on_error_no_add!( - &cost, + cost, path_query.terminal_keys(max_results, grove_version) ); @@ -698,7 +696,7 @@ where { let mut cost = OperationCost::default(); let terminal_keys = cost_return_on_error_no_add!( - &cost, + cost, path_query.terminal_keys(max_results, grove_version) ); diff --git a/grovedb/src/operations/get/worst_case.rs b/grovedb/src/operations/get/worst_case.rs index e6382dd8..75fabb54 100644 --- a/grovedb/src/operations/get/worst_case.rs +++ b/grovedb/src/operations/get/worst_case.rs @@ -6,7 +6,7 @@ use grovedb_costs::OperationCost; use grovedb_merk::tree_type::TreeType; #[cfg(feature = "minimal")] use grovedb_storage::rocksdb_storage::RocksDbStorage; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; use crate::Error; #[cfg(feature = "minimal")] diff --git a/grovedb/src/operations/insert/mod.rs b/grovedb/src/operations/insert/mod.rs index e362a761..e7df4282 100644 --- a/grovedb/src/operations/insert/mod.rs +++ b/grovedb/src/operations/insert/mod.rs @@ -11,14 +11,11 @@ use grovedb_costs::{ use grovedb_merk::{tree::NULL_HASH, Merk, MerkOptions}; use grovedb_path::SubtreePath; #[cfg(feature = "minimal")] -use grovedb_storage::rocksdb_storage::{ - PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, -}; +use grovedb_storage::rocksdb_storage::PrefixedRocksDbTransactionContext; use grovedb_storage::{Storage, StorageBatch}; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; +use crate::util::TxRef; #[cfg(feature = "minimal")] use crate::{ reference_path::path_from_reference_path_type, Element, Error, GroveDb, Transaction, @@ -86,32 +83,31 @@ impl GroveDb { let subtree_path: SubtreePath = path.into(); let batch = StorageBatch::new(); - let collect_costs = if let Some(transaction) = transaction { + let tx = TxRef::new(&self.db, transaction); + + let mut cost = Default::default(); + + cost_return_on_error!( + &mut cost, self.insert_on_transaction( subtree_path, key, element, options.unwrap_or_default(), - transaction, + tx.as_ref(), &batch, grove_version, ) - } else { - self.insert_without_transaction( - subtree_path, - key, - element, - options.unwrap_or_default(), - &batch, - grove_version, - ) - }; + ); - collect_costs.flat_map_ok(|_| { + cost_return_on_error!( + &mut cost, self.db - .commit_multi_context_batch(batch, transaction) + .commit_multi_context_batch(batch, Some(tx.as_ref())) .map_err(Into::into) - }) + ); + + tx.commit_local().wrap_with_cost(cost) } fn insert_on_transaction<'db, 'b, B: AsRef<[u8]>>( @@ -165,50 +161,6 @@ impl GroveDb { Ok(()).wrap_with_cost(cost) } - fn insert_without_transaction<'b, B: AsRef<[u8]>>( - &self, - path: SubtreePath<'b, B>, - key: &[u8], - element: Element, - options: InsertOptions, - batch: &StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult<(), Error> { - check_grovedb_v0_with_cost!( - "insert_without_transaction", - grove_version - .grovedb_versions - .operations - .insert - .insert_without_transaction - ); - - let mut cost = OperationCost::default(); - - let mut merk_cache: HashMap, Merk> = - HashMap::default(); - - let merk = cost_return_on_error!( - &mut cost, - self.add_element_without_transaction( - &path.to_vec(), - key, - element, - options, - batch, - grove_version - ) - ); - merk_cache.insert(path.clone(), merk); - - cost_return_on_error!( - &mut cost, - self.propagate_changes_without_transaction(merk_cache, path, batch, grove_version) - ); - - Ok(()).wrap_with_cost(cost) - } - /// Add subtree to another subtree. /// We want to add a new empty merk to another merk at a key /// first make sure other merk exist @@ -267,7 +219,7 @@ impl GroveDb { } if options.validate_insertion_does_not_override_tree { let element = cost_return_on_error_no_add!( - &cost, + cost, Element::deserialize(element_bytes.as_slice(), grove_version).map_err( |_| { Error::CorruptedData(String::from("unable to deserialize element")) @@ -355,142 +307,6 @@ impl GroveDb { Ok(subtree_to_insert_into).wrap_with_cost(cost) } - /// Add an empty tree or item to a parent tree. - /// We want to add a new empty merk to another merk at a key - /// first make sure other merk exist - /// if it exists, then create merk to be inserted, and get root hash - /// we only care about root hash of merk to be inserted - fn add_element_without_transaction<'db, B: AsRef<[u8]>>( - &'db self, - path: &[B], - key: &[u8], - element: Element, - options: InsertOptions, - batch: &'db StorageBatch, - grove_version: &GroveVersion, - ) -> CostResult, Error> { - check_grovedb_v0_with_cost!( - "add_element_without_transaction", - grove_version - .grovedb_versions - .operations - .insert - .add_element_without_transaction - ); - - let mut cost = OperationCost::default(); - let mut subtree_to_insert_into = cost_return_on_error!( - &mut cost, - self.open_non_transactional_merk_at_path(path.into(), Some(batch), grove_version) - ); - - if options.checks_for_override() { - let maybe_element_bytes = cost_return_on_error!( - &mut cost, - subtree_to_insert_into - .get( - key, - true, - Some(&Element::value_defined_cost_for_serialized_value), - grove_version - ) - .map_err(|e| Error::CorruptedData(e.to_string())) - ); - if let Some(element_bytes) = maybe_element_bytes { - if options.validate_insertion_does_not_override { - return Err(Error::OverrideNotAllowed( - "insertion not allowed to override", - )) - .wrap_with_cost(cost); - } - if options.validate_insertion_does_not_override_tree { - let element = cost_return_on_error_no_add!( - &cost, - Element::deserialize(element_bytes.as_slice(), grove_version).map_err( - |_| { - Error::CorruptedData(String::from("unable to deserialize element")) - } - ) - ); - if element.is_any_tree() { - return Err(Error::OverrideNotAllowed( - "insertion not allowed to override tree", - )) - .wrap_with_cost(cost); - } - } - } - } - - match element { - Element::Reference(ref reference_path, ..) => { - let reference_path = cost_return_on_error!( - &mut cost, - path_from_reference_path_type(reference_path.clone(), path, Some(key)) - .wrap_with_cost(OperationCost::default()) - ); - let referenced_item = cost_return_on_error!( - &mut cost, - self.follow_reference( - reference_path.as_slice().into(), - false, - None, - grove_version - ) - ); - - let referenced_element_value_hash = - cost_return_on_error!(&mut cost, referenced_item.value_hash(grove_version)); - - cost_return_on_error!( - &mut cost, - element.insert_reference( - &mut subtree_to_insert_into, - key, - referenced_element_value_hash, - Some(options.as_merk_options()), - grove_version - ) - ); - } - Element::Tree(ref value, _) - | Element::SumTree(ref value, ..) - | Element::BigSumTree(ref value, ..) - | Element::CountTree(ref value, ..) => { - if value.is_some() { - return Err(Error::InvalidCodeExecution( - "a tree should be empty at the moment of insertion when not using batches", - )) - .wrap_with_cost(cost); - } else { - cost_return_on_error!( - &mut cost, - element.insert_subtree( - &mut subtree_to_insert_into, - key, - NULL_HASH, - Some(options.as_merk_options()), - grove_version - ) - ); - } - } - _ => { - cost_return_on_error!( - &mut cost, - element.insert( - &mut subtree_to_insert_into, - key, - Some(options.as_merk_options()), - grove_version - ) - ); - } - } - - Ok(subtree_to_insert_into).wrap_with_cost(cost) - } - /// Insert if not exists /// Insert if not exists /// diff --git a/grovedb/src/operations/is_empty_tree.rs b/grovedb/src/operations/is_empty_tree.rs index 4dec3abf..f1357fe3 100644 --- a/grovedb/src/operations/is_empty_tree.rs +++ b/grovedb/src/operations/is_empty_tree.rs @@ -1,17 +1,14 @@ //! Check if empty tree operations -#[cfg(feature = "minimal")] -use grovedb_costs::{cost_return_on_error, CostResult, CostsExt, OperationCost}; -use grovedb_merk::tree_type::TreeType; +use grovedb_costs::{cost_return_on_error, CostResult, OperationCost}; use grovedb_path::SubtreePath; -#[cfg(feature = "minimal")] -use grovedb_version::error::GroveVersionError; use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; -#[cfg(feature = "minimal")] -use crate::{util::merk_optional_tx, Element, Error, GroveDb, TransactionArg}; +use crate::{ + util::{compat, TxRef}, + Error, GroveDb, TransactionArg, +}; -#[cfg(feature = "minimal")] impl GroveDb { /// Check if it's an empty tree pub fn is_empty_tree<'b, B, P>( @@ -31,19 +28,17 @@ impl GroveDb { let mut cost = OperationCost::default(); let path: SubtreePath = path.into(); + let tx = TxRef::new(&self.db, transaction); + cost_return_on_error!( &mut cost, - self.check_subtree_exists_path_not_found(path.clone(), transaction, grove_version) + self.check_subtree_exists_path_not_found(path.clone(), tx.as_ref(), grove_version) ); - merk_optional_tx!( + let subtree = cost_return_on_error!( &mut cost, - self.db, - path, - None, - transaction, - subtree, - grove_version, - { Ok(subtree.is_empty_tree().unwrap_add_cost(&mut cost)).wrap_with_cost(cost) } - ) + compat::merk_optional_tx(&self.db, path, tx.as_ref(), None, grove_version) + ); + + subtree.is_empty_tree().add_cost(cost).map(Ok) } } diff --git a/grovedb/src/operations/proof/generate.rs b/grovedb/src/operations/proof/generate.rs index aff48637..7fe50c6e 100644 --- a/grovedb/src/operations/proof/generate.rs +++ b/grovedb/src/operations/proof/generate.rs @@ -12,9 +12,7 @@ use grovedb_merk::{ Merk, ProofWithoutEncodingResult, }; use grovedb_storage::StorageContext; -use grovedb_version::{ - check_grovedb_v0_with_cost, error::GroveVersionError, version::GroveVersion, -}; +use grovedb_version::{check_grovedb_v0_with_cost, version::GroveVersion}; #[cfg(feature = "proof_debug")] use crate::query_result_type::QueryResultType; @@ -89,7 +87,7 @@ impl GroveDb { .with_big_endian() .with_no_limit(); let encoded_proof = cost_return_on_error_no_add!( - &cost, + cost, bincode::encode_to_vec(proof, config) .map_err(|e| Error::CorruptedData(format!("unable to encode proof {}", e))) ); @@ -192,8 +190,10 @@ impl GroveDb { ) -> CostResult { let mut cost = OperationCost::default(); + let tx = self.start_transaction(); + let query = cost_return_on_error_no_add!( - &cost, + cost, path_query .query_items_at_path(path.as_slice(), grove_version) .and_then(|query_items| { @@ -210,7 +210,7 @@ impl GroveDb { let subtree = cost_return_on_error!( &mut cost, - self.open_non_transactional_merk_at_path(path.as_slice().into(), None, grove_version) + self.open_transactional_merk_at_path(path.as_slice().into(), &tx, None, grove_version) ); let limit = if path.len() < path_query.path.len() { diff --git a/grovedb/src/operations/proof/verify.rs b/grovedb/src/operations/proof/verify.rs index dca00bb9..e67b7eb1 100644 --- a/grovedb/src/operations/proof/verify.rs +++ b/grovedb/src/operations/proof/verify.rs @@ -9,8 +9,7 @@ use grovedb_merk::{ CryptoHash, }; use grovedb_version::{ - check_grovedb_v0, error::GroveVersionError, version::GroveVersion, TryFromVersioned, - TryIntoVersioned, + check_grovedb_v0, version::GroveVersion, TryFromVersioned, TryIntoVersioned, }; #[cfg(feature = "proof_debug")] @@ -290,7 +289,7 @@ impl GroveDb { if merk_result.result_set.is_empty() { if prove_options.decrease_limit_on_empty_sub_query_result { - limit_left.as_mut().map(|limit| *limit -= 1); + limit_left.iter_mut().for_each(|limit| *limit -= 1); } } else { for proved_key_value in merk_result.result_set { @@ -371,7 +370,7 @@ impl GroveDb { } result.push(path_key_optional_value.try_into_versioned(grove_version)?); - limit_left.as_mut().map(|limit| *limit -= 1); + limit_left.iter_mut().for_each(|limit| *limit -= 1); if limit_left == &Some(0) { break; } diff --git a/grovedb/src/query/mod.rs b/grovedb/src/query/mod.rs index 890da1db..41c09245 100644 --- a/grovedb/src/query/mod.rs +++ b/grovedb/src/query/mod.rs @@ -12,7 +12,7 @@ use grovedb_merk::proofs::query::query_item::QueryItem; use grovedb_merk::proofs::query::{Key, SubqueryBranch}; #[cfg(any(feature = "minimal", feature = "verify"))] use grovedb_merk::proofs::Query; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; use indexmap::IndexMap; use crate::operations::proof::util::hex_to_ascii; @@ -464,7 +464,7 @@ pub enum HasSubquery<'a> { } #[cfg(any(feature = "minimal", feature = "verify"))] -impl<'a> fmt::Display for HasSubquery<'a> { +impl fmt::Display for HasSubquery<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { HasSubquery::NoSubquery => write!(f, "NoSubquery"), @@ -480,7 +480,7 @@ impl<'a> fmt::Display for HasSubquery<'a> { } } -impl<'a> HasSubquery<'a> { +impl HasSubquery<'_> { /// Checks to see if we have a subquery on a specific key pub fn has_subquery_on_key(&self, key: &[u8]) -> bool { match self { @@ -509,7 +509,7 @@ pub struct SinglePathSubquery<'a> { } #[cfg(any(feature = "minimal", feature = "verify"))] -impl<'a> fmt::Display for SinglePathSubquery<'a> { +impl fmt::Display for SinglePathSubquery<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "InternalCowItemsQuery {{")?; writeln!(f, " items: [")?; diff --git a/grovedb/src/reference_path.rs b/grovedb/src/reference_path.rs index afd85260..11d407a6 100644 --- a/grovedb/src/reference_path.rs +++ b/grovedb/src/reference_path.rs @@ -2,15 +2,27 @@ #[cfg(any(feature = "minimal", feature = "verify"))] use std::fmt; +use std::{collections::HashSet, iter}; use bincode::{Decode, Encode}; -#[cfg(feature = "minimal")] +use grovedb_costs::{cost_return_on_error, cost_return_on_error_no_add, CostResult, CostsExt}; +use grovedb_merk::CryptoHash; +#[cfg(any(feature = "minimal", feature = "verify"))] +use grovedb_path::{SubtreePath, SubtreePathBuilder}; +use grovedb_version::check_grovedb_v0_with_cost; +#[cfg(any(feature = "minimal", feature = "visualize"))] use grovedb_visualize::visualize_to_vec; #[cfg(feature = "minimal")] use integer_encoding::VarInt; #[cfg(any(feature = "minimal", feature = "verify"))] use crate::Error; +#[cfg(feature = "minimal")] +use crate::{ + merk_cache::{MerkCache, MerkHandle}, + operations::MAX_REFERENCE_HOPS, + Element, +}; #[cfg(any(feature = "minimal", feature = "verify"))] #[cfg_attr(not(any(feature = "minimal", feature = "visualize")), derive(Debug))] @@ -59,10 +71,89 @@ pub enum ReferencePathType { SiblingReference(Vec), } +impl ReferencePathType { + /// Get an inverted reference + pub(crate) fn invert>(&self, path: SubtreePath, key: &[u8]) -> Option { + Some(match self { + // Absolute path shall point to a fully qualified path of the reference's origin + ReferencePathType::AbsolutePathReference(_) => { + let mut qualified_path = path.to_vec(); + qualified_path.push(key.to_vec()); + ReferencePathType::AbsolutePathReference(qualified_path) + } + // Since both reference origin and path share N first segments, the backward reference + // can do the same, key we shall persist for a qualified path as the output + ReferencePathType::UpstreamRootHeightReference(n, _) => { + let relative_path: Vec<_> = path + .to_vec() + .into_iter() + .skip(*n as usize) + .chain(iter::once(key.to_vec())) + .collect(); + ReferencePathType::UpstreamRootHeightReference(*n, relative_path) + } + // Since it uses some parent information it get's complicated, so falling back to the + // preivous type of reference + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference(n, _) => { + let relative_path: Vec<_> = path + .to_vec() + .into_iter() + .skip(*n as usize) + .chain(iter::once(key.to_vec())) + .collect(); + ReferencePathType::UpstreamRootHeightReference(*n, relative_path) + } + // Discarding N latest segments is relative to the previously appended path, so it would + // be easier to discard appended paths both ways and have a shared prefix. + ReferencePathType::UpstreamFromElementHeightReference(n, append_path) => { + let mut relative_path: Vec> = path + .into_reverse_iter() + .take(*n as usize) + .map(|x| x.to_vec()) + .collect(); + relative_path.reverse(); + relative_path.push(key.to_vec()); + ReferencePathType::UpstreamFromElementHeightReference( + append_path.len() as u8 - 1, + relative_path, + ) + } + // Cousin is relative to cousin, key will remain the same + ReferencePathType::CousinReference(_) => ReferencePathType::CousinReference( + path.into_reverse_iter().next().map(|x| x.to_vec())?, + ), + // Here since any number of segments could've been added we need to resort to a more + // specific option + ReferencePathType::RemovedCousinReference(append_path) => { + let mut relative_path = + vec![path.into_reverse_iter().next().map(|x| x.to_vec())?]; + relative_path.push(key.to_vec()); + ReferencePathType::UpstreamFromElementHeightReference( + append_path.len() as u8, + relative_path, + ) + } + // The closest way back would be just to use the key + ReferencePathType::SiblingReference(_) => { + ReferencePathType::SiblingReference(key.to_vec()) + } + }) + } +} + // Helper function to display paths fn display_path(path: &[Vec]) -> String { path.iter() - .map(hex::encode) + .map(|bytes| { + let mut hx = hex::encode(bytes); + if let Ok(s) = String::from_utf8(bytes.clone()) { + hx.push('('); + hx.push_str(&s); + hx.push(')'); + } + + hx + }) .collect::>() .join("/") } @@ -132,6 +223,132 @@ impl ReferencePathType { ) -> Result>, Error> { path_from_reference_path_type(self, current_path, current_key) } + + /// TODO: deprecate the rest + pub fn absolute_qualified_path<'b, B: AsRef<[u8]>>( + self, + mut current_path: SubtreePathBuilder<'b, B>, + current_key: &[u8], + ) -> Result, Error> { + match self { + ReferencePathType::AbsolutePathReference(path) => { + Ok(SubtreePathBuilder::owned_from_iter(path)) + } + + ReferencePathType::UpstreamRootHeightReference(no_of_elements_to_keep, append_path) => { + let len = current_path.len(); + if no_of_elements_to_keep as usize > len { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + let n_to_remove = len - no_of_elements_to_keep as usize; + + let referenced_path = (0..n_to_remove).fold(current_path, |p, _| { + p.derive_parent_owned() + .expect("lenghts were checked above") + .0 + }); + let referenced_path = append_path.into_iter().fold(referenced_path, |mut p, s| { + p.push_segment(&s); + p + }); + + Ok(referenced_path) + } + + ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + no_of_elements_to_keep, + append_path, + ) => { + let len = current_path.len(); + if no_of_elements_to_keep as usize > len || len < 1 { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + + let parent_key = current_path + .reverse_iter() + .next() + .expect("lengths were checked above") + .to_vec(); + + let n_to_remove = len - no_of_elements_to_keep as usize; + + let referenced_path = (0..n_to_remove).fold(current_path, |p, _| { + p.derive_parent_owned() + .expect("lenghts were checked above") + .0 + }); + let mut referenced_path = + append_path.into_iter().fold(referenced_path, |mut p, s| { + p.push_segment(&s); + p + }); + referenced_path.push_segment(&parent_key); + + Ok(referenced_path) + } + + // Discard the last n elements from current path, append new path to subpath + ReferencePathType::UpstreamFromElementHeightReference( + no_of_elements_to_discard_from_end, + append_path, + ) => { + let mut referenced_path = current_path; + for _ in 0..no_of_elements_to_discard_from_end { + if let Some((path, _)) = referenced_path.derive_parent_owned() { + referenced_path = path; + } else { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + } + } + + let referenced_path = append_path.into_iter().fold(referenced_path, |mut p, s| { + p.push_segment(&s); + p + }); + + Ok(referenced_path) + } + + ReferencePathType::CousinReference(cousin_key) => { + let Some((mut referred_path, _)) = current_path.derive_parent_owned() else { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + }; + + referred_path.push_segment(&cousin_key); + referred_path.push_segment(current_key); + + Ok(referred_path) + } + + ReferencePathType::RemovedCousinReference(cousin_path) => { + let Some((mut referred_path, _)) = current_path.derive_parent_owned() else { + return Err(Error::InvalidInput( + "reference stored path cannot satisfy reference constraints", + )); + }; + + cousin_path + .into_iter() + .for_each(|s| referred_path.push_segment(&s)); + referred_path.push_segment(current_key); + + Ok(referred_path) + } + + ReferencePathType::SiblingReference(sibling_key) => { + current_path.push_segment(&sibling_key); + Ok(current_path) + } + } + } } #[cfg(any(feature = "minimal", feature = "visualize"))] @@ -320,10 +537,161 @@ impl ReferencePathType { } } +#[cfg(feature = "minimal")] +pub(crate) struct ResolvedReference<'db, 'b, 'c, B> { + pub target_merk: MerkHandle<'db, 'c>, + pub target_path: SubtreePathBuilder<'b, B>, + pub target_key: Vec, + pub target_element: Element, + pub target_node_value_hash: CryptoHash, +} + +#[cfg(feature = "minimal")] +pub(crate) fn follow_reference<'db, 'b, 'c, B: AsRef<[u8]>>( + merk_cache: &'c MerkCache<'db, 'b, B>, + path: SubtreePathBuilder<'b, B>, + key: &[u8], + ref_path: ReferencePathType, +) -> CostResult, Error> { + // TODO: this is a new version of follow reference + + check_grovedb_v0_with_cost!( + "follow_reference", + merk_cache + .version + .grovedb_versions + .operations + .get + .follow_reference + ); + + let mut cost = Default::default(); + + let mut hops_left = MAX_REFERENCE_HOPS; + let mut visited = HashSet::new(); + + let mut qualified_path = path.clone(); + qualified_path.push_segment(key); + + visited.insert(qualified_path); + + let mut current_path = path; + let mut current_key = key.to_vec(); + let mut current_ref = ref_path; + + while hops_left > 0 { + let referred_qualified_path = cost_return_on_error_no_add!( + cost, + current_ref.absolute_qualified_path(current_path, ¤t_key) + ); + + if !visited.insert(referred_qualified_path.clone()) { + return Err(Error::CyclicReference).wrap_with_cost(cost); + } + + let Some((referred_path, referred_key)) = referred_qualified_path.derive_parent_owned() + else { + return Err(Error::InvalidCodeExecution("empty reference")).wrap_with_cost(cost); + }; + + let mut referred_merk = + cost_return_on_error!(&mut cost, merk_cache.get_merk(referred_path.clone())); + let (element, value_hash) = cost_return_on_error!( + &mut cost, + referred_merk + .for_merk(|m| { + Element::get_with_value_hash(m, &referred_key, true, merk_cache.version) + }) + .map_err(|e| match e { + Error::PathKeyNotFound(s) => Error::CorruptedReferencePathKeyNotFound(s), + e => e, + }) + ); + + match element { + Element::Reference(ref_path, ..) => { + current_path = referred_path; + current_key = referred_key; + current_ref = ref_path; + hops_left -= 1; + } + e => { + return Ok(ResolvedReference { + target_merk: referred_merk, + target_path: referred_path, + target_key: referred_key, + target_element: e, + target_node_value_hash: value_hash, + }) + .wrap_with_cost(cost) + } + } + } + + Err(Error::ReferenceLimit).wrap_with_cost(cost) +} + +#[cfg(feature = "minimal")] +/// Follow references stopping at the immediate element without following +/// further. +pub(crate) fn follow_reference_once<'db, 'b, 'c, B: AsRef<[u8]>>( + merk_cache: &'c MerkCache<'db, 'b, B>, + path: SubtreePathBuilder<'b, B>, + key: &[u8], + ref_path: ReferencePathType, +) -> CostResult, Error> { + check_grovedb_v0_with_cost!( + "follow_reference_once", + merk_cache + .version + .grovedb_versions + .operations + .get + .follow_reference_once + ); + + let mut cost = Default::default(); + + let referred_qualified_path = + cost_return_on_error_no_add!(cost, ref_path.absolute_qualified_path(path.clone(), key)); + + let Some((referred_path, referred_key)) = referred_qualified_path.derive_parent_owned() else { + return Err(Error::InvalidCodeExecution("empty reference")).wrap_with_cost(cost); + }; + + if path == referred_path && key == referred_key { + return Err(Error::CyclicReference).wrap_with_cost(cost); + } + + let mut referred_merk = + cost_return_on_error!(&mut cost, merk_cache.get_merk(referred_path.clone())); + let (element, value_hash) = cost_return_on_error!( + &mut cost, + referred_merk + .for_merk(|m| { + Element::get_with_value_hash(m, &referred_key, true, merk_cache.version) + }) + .map_err(|e| match e { + Error::PathKeyNotFound(s) => Error::CorruptedReferencePathKeyNotFound(s), + e => e, + }) + ); + + Ok(ResolvedReference { + target_merk: referred_merk, + target_path: referred_path, + target_key: referred_key, + target_element: element, + target_node_value_hash: value_hash, + }) + .wrap_with_cost(cost) +} + #[cfg(feature = "minimal")] #[cfg(test)] mod tests { use grovedb_merk::proofs::Query; + use grovedb_path::{SubtreePath, SubtreePathBuilder}; use grovedb_version::version::GroveVersion; use crate::{ @@ -345,6 +713,20 @@ mod tests { ); } + #[test] + fn test_upstream_root_height_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]); + // selects the first 2 elements from the stored path and appends the new path. + let ref1 = + ReferencePathType::UpstreamRootHeightReference(2, vec![b"c".to_vec(), b"d".to_vec()]); + let final_path = ref1.absolute_qualified_path(stored_path, b"").unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec(), b"d".to_vec()] + ); + } + #[test] fn test_upstream_root_height_with_parent_addition_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -366,6 +748,28 @@ mod tests { ); } + #[test] + fn test_upstream_root_height_with_parent_addition_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]); + // selects the first 2 elements from the stored path and appends the new path. + let ref1 = ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + 2, + vec![b"c".to_vec(), b"d".to_vec()], + ); + let final_path = ref1.absolute_qualified_path(stored_path, b"").unwrap(); + assert_eq!( + final_path.to_vec(), + vec![ + b"a".to_vec(), + b"b".to_vec(), + b"c".to_vec(), + b"d".to_vec(), + b"m".to_vec() + ] + ); + } + #[test] fn test_upstream_from_element_height_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -381,6 +785,22 @@ mod tests { ); } + #[test] + fn test_upstream_from_element_height_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]); + // discards the last element from the stored_path + let ref1 = ReferencePathType::UpstreamFromElementHeightReference( + 1, + vec![b"c".to_vec(), b"d".to_vec()], + ); + let final_path = ref1.absolute_qualified_path(stored_path, b"").unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec(), b"d".to_vec()] + ); + } + #[test] fn test_cousin_reference_no_key() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -403,6 +823,20 @@ mod tests { ); } + #[test] + fn test_cousin_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref()]); + let key = b"m".as_ref(); + // Replaces the immediate parent (in this case b) with the given key (c) + let ref1 = ReferencePathType::CousinReference(b"c".to_vec()); + let final_path = ref1.absolute_qualified_path(stored_path, key).unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"c".to_vec(), b"m".to_vec()] + ); + } + #[test] fn test_removed_cousin_reference_no_key() { let stored_path = vec![b"a".as_ref(), b"b".as_ref(), b"m".as_ref()]; @@ -425,6 +859,20 @@ mod tests { ); } + #[test] + fn test_removed_cousin_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref()]); + let key = b"m".as_ref(); + // Replaces the immediate parent (in this case b) with the given key (c) + let ref1 = ReferencePathType::RemovedCousinReference(vec![b"c".to_vec(), b"d".to_vec()]); + let final_path = ref1.absolute_qualified_path(stored_path, key).unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"c".to_vec(), b"d".to_vec(), b"m".to_vec()] + ); + } + #[test] fn test_sibling_reference() { let stored_path = vec![b"a".as_ref(), b"b".as_ref()]; @@ -437,6 +885,19 @@ mod tests { ); } + #[test] + fn test_sibling_reference_path_lib() { + let stored_path: SubtreePathBuilder<&[u8]> = + SubtreePathBuilder::owned_from_iter([b"a".as_ref(), b"b".as_ref()]); + let key = b"m".as_ref(); + let ref1 = ReferencePathType::SiblingReference(b"c".to_vec()); + let final_path = ref1.absolute_qualified_path(stored_path, key).unwrap(); + assert_eq!( + final_path.to_vec(), + vec![b"a".to_vec(), b"b".to_vec(), b"c".to_vec()] + ); + } + #[test] fn test_query_many_with_different_reference_types() { let grove_version = GroveVersion::latest(); @@ -515,4 +976,251 @@ mod tests { assert_eq!(hash, db.root_hash(None, grove_version).unwrap().unwrap()); assert_eq!(result.len(), 5); } + + #[test] + fn inverted_absolute_path() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + + let reference = + ReferencePathType::AbsolutePathReference(vec![b"m".to_vec(), b"n".to_vec()]); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path + ); + } + + #[test] + fn inverted_upstream_root_height() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + + let reference = + ReferencePathType::UpstreamRootHeightReference(2, vec![b"m".to_vec(), b"n".to_vec()]); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), None) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + + #[test] + fn inverted_upstream_root_height_with_parent_path_addition() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::UpstreamRootHeightWithParentPathAdditionReference( + 2, + vec![b"m".to_vec(), b"n".to_vec()], + ); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + + #[test] + fn inverted_upstream_from_element_height() { + { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::UpstreamFromElementHeightReference( + 1, + vec![b"m".to_vec(), b"n".to_vec()], + ); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + + { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::UpstreamFromElementHeightReference( + 3, + vec![b"m".to_vec(), b"n".to_vec()], + ); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path.to_vec(), + ); + } + } + + #[test] + fn inverted_cousin_reference() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = + ReferencePathType::RemovedCousinReference(vec![b"m".to_vec(), b"n".to_vec()]); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path + ); + } + + #[test] + fn inverted_sibling_reference() { + let current_path: SubtreePath<_> = (&[b"a", b"b", b"c", b"d"]).into(); + let current_key = b"e"; + let current_qualified_path = { + let mut p = current_path.to_vec(); + p.push(current_key.to_vec()); + p + }; + let reference = ReferencePathType::SiblingReference(b"yeet".to_vec()); + + let pointed_to_qualified_path = reference + .clone() + .absolute_path(¤t_path.to_vec(), Some(current_key)) + .unwrap(); + let (pointed_to_key, pointed_to_path) = pointed_to_qualified_path.split_last().unwrap(); + + let inverse = reference.invert(current_path.clone(), current_key).unwrap(); + + assert_ne!(reference, inverse); + assert_eq!( + reference, + inverse + .invert(pointed_to_path.into(), pointed_to_key) + .unwrap() + ); + assert_eq!( + inverse + .absolute_path(&pointed_to_path, Some(pointed_to_key)) + .unwrap(), + current_qualified_path + ); + } } diff --git a/grovedb/src/replication.rs b/grovedb/src/replication.rs index 000f97b2..3cb23790 100644 --- a/grovedb/src/replication.rs +++ b/grovedb/src/replication.rs @@ -4,10 +4,10 @@ use std::pin::Pin; use grovedb_merk::{tree::hash::CryptoHash, tree_type::TreeType, ChunkProducer}; use grovedb_path::SubtreePath; -use grovedb_version::{check_grovedb_v0, error::GroveVersionError, version::GroveVersion}; +use grovedb_version::{check_grovedb_v0, version::GroveVersion}; pub use self::state_sync_session::MultiStateSyncSession; -use crate::{Error, GroveDb, TransactionArg}; +use crate::{util::TxRef, Error, GroveDb, TransactionArg}; /// Type alias representing a chunk identifier in the state synchronization /// process. @@ -74,6 +74,9 @@ impl GroveDb { "fetch_chunk", grove_version.grovedb_versions.replication.fetch_chunk ); + + let tx = TxRef::new(&self.db, transaction); + // For now, only CURRENT_STATE_SYNC_VERSION is supported if version != CURRENT_STATE_SYNC_VERSION { return Err(Error::CorruptedData( @@ -81,104 +84,57 @@ impl GroveDb { )); } - let root_app_hash = self.root_hash(transaction, grove_version).value?; + let root_app_hash = self.root_hash(Some(tx.as_ref()), grove_version).value?; let (chunk_prefix, root_key, tree_type, chunk_id) = utils::decode_global_chunk_id(global_chunk_id, &root_app_hash)?; // TODO: Refactor this by writing fetch_chunk_inner (as only merk constructor // and type are different) - if let Some(tx) = transaction { - let merk = self - .open_transactional_merk_by_prefix( - chunk_prefix, - root_key, - tree_type, - tx, - None, - grove_version, - ) - .value - .map_err(|e| { - Error::CorruptedData(format!( - "failed to open merk by prefix tx:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - if merk.is_empty_tree().unwrap() { - return Ok(vec![]); - } - - let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { - Error::CorruptedData(format!( - "failed to create chunk producer by prefix tx:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let (chunk, _) = chunk_producer - .chunk(&chunk_id, grove_version) - .map_err(|e| { - Error::CorruptedData(format!( - "failed to apply chunk:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + let merk = self + .open_transactional_merk_by_prefix( + chunk_prefix, + root_key, + tree_type, + tx.as_ref(), + None, + grove_version, + ) + .value + .map_err(|e| { Error::CorruptedData(format!( - "failed to encode chunk ops:{} with:{}", + "failed to open merk by prefix tx:{} with:{}", hex::encode(chunk_prefix), e )) })?; - Ok(op_bytes) - } else { - let merk = self - .open_non_transactional_merk_by_prefix( - chunk_prefix, - root_key, - tree_type, - None, - grove_version, - ) - .value - .map_err(|e| { - Error::CorruptedData(format!( - "failed to open merk by prefix non-tx:{} with:{}", - e, - hex::encode(chunk_prefix) - )) - })?; - if merk.is_empty_tree().unwrap() { - return Ok(vec![]); - } + if merk.is_empty_tree().unwrap() { + return Ok(vec![]); + } - let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { - Error::CorruptedData(format!( - "failed to create chunk producer by prefix non-tx:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let (chunk, _) = chunk_producer - .chunk(&chunk_id, grove_version) - .map_err(|e| { - Error::CorruptedData(format!( - "failed to apply chunk:{} with:{}", - hex::encode(chunk_prefix), - e - )) - })?; - let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + let mut chunk_producer = ChunkProducer::new(&merk).map_err(|e| { + Error::CorruptedData(format!( + "failed to create chunk producer by prefix tx:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + let (chunk, _) = chunk_producer + .chunk(&chunk_id, grove_version) + .map_err(|e| { Error::CorruptedData(format!( - "failed to encode chunk ops:{} with:{}", + "failed to apply chunk:{} with:{}", hex::encode(chunk_prefix), e )) })?; - Ok(op_bytes) - } + let op_bytes = utils::encode_vec_ops(chunk).map_err(|e| { + Error::CorruptedData(format!( + "failed to encode chunk ops:{} with:{}", + hex::encode(chunk_prefix), + e + )) + })?; + Ok(op_bytes) } /// Starts a state synchronization process for a snapshot with the given diff --git a/grovedb/src/tests/count_sum_tree_tests.rs b/grovedb/src/tests/count_sum_tree_tests.rs index f171aee0..7c5e4b2d 100644 --- a/grovedb/src/tests/count_sum_tree_tests.rs +++ b/grovedb/src/tests/count_sum_tree_tests.rs @@ -109,9 +109,12 @@ mod count_sum_tree_tests { // Test aggregate data (count and sum) let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -168,9 +171,12 @@ mod count_sum_tree_tests { // Test aggregate data let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -249,9 +255,12 @@ mod count_sum_tree_tests { // Open merk and check all elements in it let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key3"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -344,11 +353,13 @@ mod count_sum_tree_tests { .expect("should insert regular tree"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Aggregate data should be None for regular tree let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"regular_key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -407,8 +418,9 @@ mod count_sum_tree_tests { // Verify aggregate data let batch = StorageBatch::new(); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key4"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -452,9 +464,12 @@ mod count_sum_tree_tests { // Open the CountSumTree and verify aggregate data let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_sum_key6"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -521,11 +536,14 @@ mod count_sum_tree_tests { // Verify aggregate data of child let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let child_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"parent_count_sum", b"child_count_sum"] .as_ref() .into(), + &transaction, Some(&batch), grove_version, ) @@ -540,8 +558,9 @@ mod count_sum_tree_tests { // Verify aggregate data of parent let parent_batch = StorageBatch::new(); let parent_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"parent_count_sum"].as_ref().into(), + &transaction, Some(&parent_batch), grove_version, ) diff --git a/grovedb/src/tests/count_tree_tests.rs b/grovedb/src/tests/count_tree_tests.rs index e4dffc06..690d9fef 100644 --- a/grovedb/src/tests/count_tree_tests.rs +++ b/grovedb/src/tests/count_tree_tests.rs @@ -165,11 +165,13 @@ mod tests { .expect("should insert item"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Open merk and check all elements in it let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -223,6 +225,8 @@ mod tests { // Perform the same test on regular trees let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); + db.insert( [TEST_LEAF].as_ref(), b"key", @@ -255,8 +259,9 @@ mod tests { .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -306,12 +311,14 @@ mod tests { .expect("should insert tree"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Sum should be non for non count tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -350,10 +357,12 @@ mod tests { ) .unwrap() .expect("should insert item"); + // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -386,8 +395,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -409,8 +419,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -442,8 +453,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -464,8 +476,9 @@ mod tests { .unwrap() .expect("expected to delete"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -572,11 +585,13 @@ mod tests { assert_eq!(count_tree.count_value_or_default(), 5); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -596,8 +611,9 @@ mod tests { assert_matches!(root_tree_feature_type, BasicMerkNode); let parent_count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -616,8 +632,9 @@ mod tests { assert_matches!(count_tree_feature_type, CountedMerkNode(4)); let child_count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"count_key", b"tree2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -702,9 +719,12 @@ mod tests { .expect("should apply batch"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); + let count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -748,8 +768,9 @@ mod tests { let batch = StorageBatch::new(); let count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -837,8 +858,9 @@ mod tests { let batch = StorageBatch::new(); let count_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) diff --git a/grovedb/src/tests/mod.rs b/grovedb/src/tests/mod.rs index a7f01eb7..1b961a7e 100644 --- a/grovedb/src/tests/mod.rs +++ b/grovedb/src/tests/mod.rs @@ -1409,12 +1409,17 @@ mod tests { fn test_root_tree_leaves_are_noted() { let grove_version = GroveVersion::latest(); let db = make_test_grovedb(grove_version); - db.check_subtree_exists_path_not_found([TEST_LEAF].as_ref().into(), None, grove_version) - .unwrap() - .expect("should exist"); + let transaction = db.start_transaction(); + db.check_subtree_exists_path_not_found( + [TEST_LEAF].as_ref().into(), + &transaction, + grove_version, + ) + .unwrap() + .expect("should exist"); db.check_subtree_exists_path_not_found( [ANOTHER_TEST_LEAF].as_ref().into(), - None, + &transaction, grove_version, ) .unwrap() @@ -3120,10 +3125,15 @@ mod tests { // let mut iter = db // .elements_iterator([TEST_LEAF, b"subtree1"].as_ref(), None) // .expect("cannot create iterator"); + let transaction = db.grove_db.start_transaction(); let storage_context = db .grove_db .db - .get_storage_context([TEST_LEAF, b"subtree1"].as_ref().into(), None) + .get_transactional_storage_context( + [TEST_LEAF, b"subtree1"].as_ref().into(), + None, + &transaction, + ) .unwrap(); let mut iter = Element::iterator(storage_context.raw_iter()).unwrap(); assert_eq!( @@ -3212,7 +3222,12 @@ mod tests { fn test_root_subtree_has_root_key() { let grove_version = GroveVersion::latest(); let db = make_test_grovedb(grove_version); - let storage = db.db.get_storage_context(EMPTY_PATH, None).unwrap(); + let transaction = db.start_transaction(); + + let storage = db + .db + .get_transactional_storage_context(EMPTY_PATH, None, &transaction) + .unwrap(); let root_merk = Merk::open_base( storage, TreeType::NormalTree, @@ -3312,10 +3327,16 @@ mod tests { // Retrieve subtree instance // Check if it returns the same instance that was inserted { + let transaction = db.grove_db.start_transaction(); + let subtree_storage = db .grove_db .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .get_transactional_storage_context( + [TEST_LEAF, b"key1", b"key2"].as_ref().into(), + None, + &transaction, + ) .unwrap(); let subtree = Merk::open_layered_with_root_key( subtree_storage, @@ -3330,6 +3351,11 @@ mod tests { .unwrap() .unwrap(); assert_eq!(result_element, Element::new_item(b"ayy".to_vec())); + + db.grove_db + .commit_transaction(transaction) + .unwrap() + .unwrap(); } // Insert a new tree with transaction let transaction = db.start_transaction(); @@ -3384,7 +3410,11 @@ mod tests { let subtree_storage = db .grove_db .db - .get_storage_context([TEST_LEAF, b"key1", b"key2"].as_ref().into(), None) + .get_transactional_storage_context( + [TEST_LEAF, b"key1", b"key2"].as_ref().into(), + None, + &transaction, + ) .unwrap(); let subtree = Merk::open_layered_with_root_key( subtree_storage, diff --git a/grovedb/src/tests/sum_tree_tests.rs b/grovedb/src/tests/sum_tree_tests.rs index 777fcb45..4aa96ee8 100644 --- a/grovedb/src/tests/sum_tree_tests.rs +++ b/grovedb/src/tests/sum_tree_tests.rs @@ -268,11 +268,13 @@ mod tests { .expect("should insert item"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Open merk and check all elements in it let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -331,6 +333,8 @@ mod tests { // Perform the same test on regular trees let db = make_test_grovedb(grove_version); + let transaction = db.start_transaction(); + db.insert( [TEST_LEAF].as_ref(), b"key", @@ -363,8 +367,9 @@ mod tests { .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -415,11 +420,13 @@ mod tests { let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Sum should be non for non sum tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -460,8 +467,9 @@ mod tests { .expect("should insert item"); // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -494,8 +502,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -518,8 +527,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -552,8 +562,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -586,8 +597,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -619,11 +631,13 @@ mod tests { let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Sum should be non for non sum tree // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -664,8 +678,9 @@ mod tests { .expect("should insert item"); // TODO: change interface to retrieve element directly let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -688,8 +703,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -712,8 +728,9 @@ mod tests { .unwrap() .expect_err("should not be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -736,8 +753,9 @@ mod tests { .unwrap() .expect("should insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -760,8 +778,9 @@ mod tests { .unwrap() .expect_err("should not be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -784,8 +803,9 @@ mod tests { .unwrap() .expect("should be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -808,8 +828,9 @@ mod tests { .expect("should be able to insert item"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -831,8 +852,9 @@ mod tests { .unwrap() .expect_err("expected not be able to delete"); let merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -938,11 +960,13 @@ mod tests { assert_eq!(sum_tree.sum_value_or_default(), 35); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -962,8 +986,9 @@ mod tests { )); let parent_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -985,8 +1010,9 @@ mod tests { )); let child_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key", b"tree2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1175,11 +1201,13 @@ mod tests { ); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); // Assert node feature types let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1199,8 +1227,9 @@ mod tests { )); let parent_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"big_sum_tree"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1237,8 +1266,9 @@ mod tests { ); let child_sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"big_sum_tree", b"sum_tree_1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1295,8 +1325,9 @@ mod tests { ); let child_sum_tree_2 = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"big_sum_tree", b"sum_tree_2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1341,11 +1372,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1386,11 +1419,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1476,11 +1511,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1516,11 +1553,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1561,11 +1600,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -1651,11 +1692,13 @@ mod tests { db.apply_batch(ops, None, None, grove_version) .unwrap() .expect("should apply batch"); + let transaction = db.start_transaction(); let batch = StorageBatch::new(); let sum_tree = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) diff --git a/grovedb/src/tests/tree_hashes_tests.rs b/grovedb/src/tests/tree_hashes_tests.rs index e86b8fd0..670b0918 100644 --- a/grovedb/src/tests/tree_hashes_tests.rs +++ b/grovedb/src/tests/tree_hashes_tests.rs @@ -56,10 +56,12 @@ fn test_node_hashes_when_inserting_item() { .expect("successful subtree insert"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -129,10 +131,12 @@ fn test_tree_hashes_when_inserting_empty_tree() { .expect("successful subtree insert"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); let test_leaf_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -173,8 +177,9 @@ fn test_tree_hashes_when_inserting_empty_tree() { .expect("value hash should be some"); let underlying_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -225,10 +230,12 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .expect("successful subtree insert"); let batch = StorageBatch::new(); + let transaction = db.start_transaction(); let under_top_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -236,8 +243,9 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .expect("should open merk"); let middle_merk_key1 = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) @@ -258,8 +266,9 @@ fn test_tree_hashes_when_inserting_empty_trees_twice_under_each_other() { .expect("value hash should be some"); let bottom_merk = db - .open_non_transactional_merk_at_path( + .open_transactional_merk_at_path( [TEST_LEAF, b"key1", b"key2"].as_ref().into(), + &transaction, Some(&batch), grove_version, ) diff --git a/grovedb/src/util.rs b/grovedb/src/util.rs index 20ec46d8..c91205a2 100644 --- a/grovedb/src/util.rs +++ b/grovedb/src/util.rs @@ -1,477 +1,39 @@ -/// Macro to execute same piece of code on different storage contexts -/// (transactional or not) using path argument. -macro_rules! storage_context_optional_tx { - ($db:expr, $path:expr, $batch:expr, $transaction:ident, $storage:ident, { $($body:tt)* }) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context($path, $batch, tx); - $($body)* - } else { - let $storage = $db - .get_storage_context($path, $batch); - $($body)* - } - } - }; -} +pub(crate) mod compat; -/// Macro to execute same piece of code on different storage contexts -/// (transactional or not) using path argument. -macro_rules! storage_context_with_parent_optional_tx { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $storage:ident, - $root_key:ident, - $tree_type:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context($path.clone(), $batch, tx) - .unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db - .get_transactional_storage_context(parent_path, $batch, tx) - .unwrap_add_cost(&mut $cost); - let element = cost_return_on_error!( - &mut $cost, - Element::get_from_storage(&parent_storage, parent_key, $grove_version) - .map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional on tx: {}", - e - ) - ) - }) - ); - let Some(($root_key, $tree_type)) = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } else { - let $storage = $db - .get_storage_context($path.clone(), $batch).unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db.get_storage_context( - parent_path, $batch - ).unwrap_add_cost(&mut $cost); - let element = cost_return_on_error!( - &mut $cost, - Element::get_from_storage(&parent_storage, parent_key, $grove_version).map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional no tx: {}", - e - ) - ) - }) - ); - let Some(($root_key, $tree_type)) = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } - } - }; -} +use grovedb_storage::Storage; -/// Macro to execute same piece of code on different storage contexts -/// (transactional or not) using path argument. -macro_rules! storage_context_with_parent_optional_tx_internal_error { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $storage:ident, - $root_key:ident, - $tree_type:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context($path.clone(), $batch, tx) - .unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db - .get_transactional_storage_context(parent_path, $batch, tx) - .unwrap_add_cost(&mut $cost); - let result = Element::get_from_storage( - &parent_storage, - parent_key, - $grove_version - ).map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional on tx: {}", - e - ) - ) - }).unwrap_add_cost(&mut $cost); - match result { - Ok(element) => { - let Some(($root_key, $tree_type)) - = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - }, - Err(e) => Err(e), - } - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } else { - let $storage = $db - .get_storage_context($path.clone(), $batch).unwrap_add_cost(&mut $cost); - if let Some((parent_path, parent_key)) = $path.derive_parent() { - let parent_storage = $db.get_storage_context( - parent_path, - $batch - ).unwrap_add_cost(&mut $cost); - let result = Element::get_from_storage( - &parent_storage, - parent_key, - $grove_version - ).map_err(|e| { - Error::PathParentLayerNotFound( - format!( - "could not get key for parent of subtree optional no tx: {}", - e - ) - ) - }).unwrap_add_cost(&mut $cost); - match result { - Ok(element) => { - let Some(($root_key, $tree_type)) - = element.root_key_and_tree_type_owned() else - { - return Err(Error::CorruptedData( - "parent is not a tree" - .to_owned(), - )).wrap_with_cost($cost); - }; - $($body)* - }, - Err(e) => Err(e), - } - } else { - return Err(Error::CorruptedData( - "path is empty".to_owned(), - )).wrap_with_cost($cost); - } - } - } - }; -} +use crate::{Error, RocksDbStorage, Transaction, TransactionArg}; -/// Macro to execute same piece of code on different storage contexts with -/// empty prefix. -macro_rules! meta_storage_context_optional_tx { - ($db:expr, $batch:expr, $transaction:ident, $storage:ident, { $($body:tt)* }) => { - { - use ::grovedb_storage::Storage; - if let Some(tx) = $transaction { - let $storage = $db - .get_transactional_storage_context( - ::grovedb_path::SubtreePath::empty(), - $batch, - tx - ); - $($body)* - } else { - let $storage = $db - .get_storage_context( - ::grovedb_path::SubtreePath::empty(), - $batch - ); - $($body)* - } - } - }; +pub(crate) enum TxRef<'a, 'db: 'a> { + Owned(Transaction<'db>), + Borrowed(&'a Transaction<'db>), } -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! merk_optional_tx { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - if $path.is_root() { - use crate::util::storage_context_optional_tx; - storage_context_optional_tx!( - $db, - ::grovedb_path::SubtreePath::empty(), - $batch, - $transaction, - storage, - { - let $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_base( - storage.unwrap_add_cost(&mut $cost), - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - }) - } else { - use crate::util::storage_context_with_parent_optional_tx; - storage_context_with_parent_optional_tx!( - &mut $cost, - $db, - $path, - $batch, - $transaction, - storage, - root_key, - tree_type, - $grove_version, - { - #[allow(unused_mut)] - let mut $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - } - ) - } - }; -} - -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! merk_optional_tx_internal_error { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - if $path.is_root() { - use crate::util::storage_context_optional_tx; - storage_context_optional_tx!( - $db, - ::grovedb_path::SubtreePath::empty(), - $batch, - $transaction, - storage, - { - let $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_base( - storage.unwrap_add_cost(&mut $cost), - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - }) - } else { - use crate::util::storage_context_with_parent_optional_tx_internal_error; - storage_context_with_parent_optional_tx_internal_error!( - &mut $cost, - $db, - $path, - $batch, - $transaction, - storage, - root_key, - tree_type, - $grove_version, - { - #[allow(unused_mut)] - let mut $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - } - ) - } - }; -} +impl<'a, 'db> TxRef<'a, 'db> { + pub(crate) fn new(db: &'db RocksDbStorage, transaction_arg: TransactionArg<'db, 'a>) -> Self { + if let Some(tx) = transaction_arg { + Self::Borrowed(tx) + } else { + Self::Owned(db.start_transaction()) + } + } -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! merk_optional_tx_path_not_empty { - ( - &mut $cost:ident, - $db:expr, - $path:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use crate::util::storage_context_with_parent_optional_tx; - storage_context_with_parent_optional_tx!( - &mut $cost, - $db, - $path, - $batch, - $transaction, - storage, - root_key, - tree_type, - $grove_version, - { - #[allow(unused_mut)] - let mut $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_layered_with_root_key( - storage, - root_key, - tree_type, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - } - ) + /// Commit the transaction if it wasn't received from outside + pub(crate) fn commit_local(self) -> Result<(), Error> { + match self { + TxRef::Owned(tx) => tx + .commit() + .map_err(|e| grovedb_storage::Error::from(e).into()), + TxRef::Borrowed(_) => Ok(()), + } } - }; } -/// Macro to execute same piece of code on Merk with varying storage -/// contexts. -macro_rules! root_merk_optional_tx { - ( - &mut $cost:ident, - $db:expr, - $batch:expr, - $transaction:ident, - $subtree:ident, - $grove_version:ident, - { $($body:tt)* } - ) => { - { - use crate::util::storage_context_optional_tx; - storage_context_optional_tx!( - $db, - ::grovedb_path::SubtreePath::empty(), - $batch, - $transaction, - storage, - { - let $subtree = cost_return_on_error!( - &mut $cost, - ::grovedb_merk::Merk::open_base( - storage.unwrap_add_cost(&mut $cost), - TreeType::NormalTree, - Some(&Element::value_defined_cost_for_serialized_value), - $grove_version, - ).map(|merk_res| - merk_res - .map_err(|_| crate::Error::CorruptedData( - "cannot open a subtree".to_owned() - )) - ) - ); - $($body)* - }) +impl<'db> AsRef> for TxRef<'_, 'db> { + fn as_ref(&self) -> &Transaction<'db> { + match self { + TxRef::Owned(tx) => tx, + TxRef::Borrowed(tx) => tx, } - }; + } } - -pub(crate) use merk_optional_tx; -pub(crate) use merk_optional_tx_internal_error; -pub(crate) use merk_optional_tx_path_not_empty; -pub(crate) use meta_storage_context_optional_tx; -pub(crate) use root_merk_optional_tx; -pub(crate) use storage_context_optional_tx; -pub(crate) use storage_context_with_parent_optional_tx; -pub(crate) use storage_context_with_parent_optional_tx_internal_error; diff --git a/grovedb/src/util/compat.rs b/grovedb/src/util/compat.rs new file mode 100644 index 00000000..894861c2 --- /dev/null +++ b/grovedb/src/util/compat.rs @@ -0,0 +1,131 @@ +use grovedb_costs::{cost_return_on_error, CostResult, CostsExt}; +use grovedb_merk::{Merk, TreeType}; +use grovedb_path::SubtreePath; +use grovedb_storage::{ + rocksdb_storage::{PrefixedRocksDbTransactionContext, RocksDbStorage}, + Storage, StorageBatch, +}; +use grovedb_version::version::GroveVersion; + +use crate::{Element, Error, Transaction}; + +pub(crate) trait OpenMerkErrorsCompat { + fn parent_key_not_found>( + e: Error, + parent_path: SubtreePath, + parent_key: &[u8], + ) -> Error; + + fn open_base_error() -> Error; + + fn parent_must_be_tree() -> Error; +} + +pub(crate) fn open_merk<'db, 'b, B, C: OpenMerkErrorsCompat>( + db: &'db RocksDbStorage, + path: SubtreePath<'b, B>, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, +) -> CostResult>, Error> +where + B: AsRef<[u8]> + 'b, +{ + let mut cost = Default::default(); + + let storage = db + .get_transactional_storage_context(path.clone(), batch, tx) + .unwrap_add_cost(&mut cost); + if let Some((parent_path, parent_key)) = path.derive_parent() { + let parent_storage = db + .get_transactional_storage_context(parent_path.clone(), batch, tx) + .unwrap_add_cost(&mut cost); + let element = cost_return_on_error!( + &mut cost, + Element::get_from_storage(&parent_storage, parent_key, grove_version) + .map_err(|e| C::parent_key_not_found(e, parent_path, parent_key)) + ); + if let Some((root_key, tree_type)) = element.root_key_and_tree_type_owned() { + Merk::open_layered_with_root_key( + storage, + root_key, + tree_type, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| C::parent_must_be_tree()) + .add_cost(cost) + } else { + Err(Error::CorruptedPath( + "cannot open a subtree as parent exists but is not a tree".to_string(), + )) + .wrap_with_cost(cost) + } + } else { + Merk::open_base( + storage, + TreeType::NormalTree, + Some(&Element::value_defined_cost_for_serialized_value), + grove_version, + ) + .map_err(|_| C::open_base_error()) + .add_cost(cost) + } +} + +/// Opens a subtree with errors returned compatible to now removed +/// `merk_optional_tx!` macro. +pub(crate) fn merk_optional_tx<'db, 'b, B>( + db: &'db RocksDbStorage, + path: SubtreePath<'b, B>, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, +) -> CostResult>, Error> +where + B: AsRef<[u8]> + 'b, +{ + struct Compat; + + impl OpenMerkErrorsCompat for Compat { + fn parent_key_not_found>( + e: Error, + _parent_path: SubtreePath, + _parent_key: &[u8], + ) -> Error { + Error::PathParentLayerNotFound(format!( + "could not get key for parent of subtree optional on tx: {}", + e + )) + } + + fn open_base_error() -> Error { + Error::CorruptedData("cannot open a subtree".to_owned()) + } + + fn parent_must_be_tree() -> Error { + Error::CorruptedData("parent is not a tree".to_owned()) + } + } + + open_merk::<_, Compat>(db, path, tx, batch, grove_version) +} + +/// Opens a subtree with errors returned compatible to now removed +/// `merk_optional_tx_path_not_empty!` macro. +pub(crate) fn merk_optional_tx_path_not_empty<'db, 'b, B>( + db: &'db RocksDbStorage, + path: SubtreePath<'b, B>, + tx: &'db Transaction, + batch: Option<&'db StorageBatch>, + grove_version: &GroveVersion, +) -> CostResult>, Error> +where + B: AsRef<[u8]> + 'b, +{ + if path.is_root() { + Err(Error::CorruptedData("path is empty".to_owned())).wrap_with_cost(Default::default()) + } else { + merk_optional_tx(db, path, tx, batch, grove_version) + } +} diff --git a/grovedb/src/visualize.rs b/grovedb/src/visualize.rs index 8fdccc7e..fbf79ee5 100644 --- a/grovedb/src/visualize.rs +++ b/grovedb/src/visualize.rs @@ -36,13 +36,12 @@ use bincode::{ }; use grovedb_merk::{Merk, VisualizeableMerk}; use grovedb_path::SubtreePathBuilder; -use grovedb_storage::StorageContext; +use grovedb_storage::{Storage, StorageContext}; use grovedb_version::version::GroveVersion; use grovedb_visualize::{visualize_stdout, Drawer, Visualize}; use crate::{ - element::Element, reference_path::ReferencePathType, util::storage_context_optional_tx, - GroveDb, TransactionArg, + element::Element, reference_path::ReferencePathType, util::TxRef, GroveDb, TransactionArg, }; impl Visualize for Element { @@ -225,35 +224,40 @@ impl GroveDb { ) -> Result> { drawer.down(); - storage_context_optional_tx!(self.db, (&path).into(), None, transaction, storage, { - let mut iter = Element::iterator(storage.unwrap().raw_iter()).unwrap(); - while let Some((key, element)) = iter - .next_element(grove_version) - .unwrap() - .expect("cannot get next element") - { - drawer.write(b"\n[key: ")?; - drawer = key.visualize(drawer)?; - drawer.write(b" ")?; - match element { - Element::Tree(..) => { - drawer.write(b"Merk root is: ")?; - drawer = element.visualize(drawer)?; - drawer.down(); - drawer = self.draw_subtree( - drawer, - path.derive_owned_with_child(key), - transaction, - grove_version, - )?; - drawer.up(); - } - other => { - drawer = other.visualize(drawer)?; - } + let tx = TxRef::new(&self.db, transaction); + + let storage = self + .db + .get_transactional_storage_context((&path).into(), None, tx.as_ref()) + .unwrap(); + + let mut iter = Element::iterator(storage.raw_iter()).unwrap(); + while let Some((key, element)) = iter + .next_element(grove_version) + .unwrap() + .expect("cannot get next element") + { + drawer.write(b"\n[key: ")?; + drawer = key.visualize(drawer)?; + drawer.write(b" ")?; + match element { + Element::Tree(..) => { + drawer.write(b"Merk root is: ")?; + drawer = element.visualize(drawer)?; + drawer.down(); + drawer = self.draw_subtree( + drawer, + path.derive_owned_with_child(key), + transaction, + grove_version, + )?; + drawer.up(); + } + other => { + drawer = other.visualize(drawer)?; } } - }); + } drawer.up(); Ok(drawer) diff --git a/merk/src/estimated_costs/average_case_costs.rs b/merk/src/estimated_costs/average_case_costs.rs index 3b535767..865d66a6 100644 --- a/merk/src/estimated_costs/average_case_costs.rs +++ b/merk/src/estimated_costs/average_case_costs.rs @@ -523,7 +523,7 @@ fn add_average_case_merk_propagate_v1( // we can get about 1 rotation, if there are more than 2 levels nodes_updated += 1; } - cost.seek_count += nodes_updated as u32; + cost.seek_count += nodes_updated; cost.hash_node_calls += nodes_updated * 2; @@ -794,7 +794,7 @@ fn add_average_case_merk_propagate_v0( // we can get about 1 rotation, if there are more than 2 levels nodes_updated += 1; } - cost.seek_count += nodes_updated as u32; + cost.seek_count += nodes_updated; cost.hash_node_calls += nodes_updated * 2; diff --git a/merk/src/merk/meta.rs b/merk/src/merk/meta.rs new file mode 100644 index 00000000..a51b7acf --- /dev/null +++ b/merk/src/merk/meta.rs @@ -0,0 +1,111 @@ +//! Metadata access for Merk trees + +use std::collections::hash_map::Entry; + +use grovedb_costs::{CostResult, CostsExt}; +use grovedb_storage::StorageContext; + +use super::Merk; +use crate::Error; + +impl<'db, S: StorageContext<'db>> Merk { + /// Get metadata for the Merk under `key`. + pub fn get_meta(&mut self, key: Vec) -> CostResult, Error> { + match self.meta_cache.entry(key) { + Entry::Occupied(e) => Ok(e.into_mut().as_deref()).wrap_with_cost(Default::default()), + Entry::Vacant(e) => self + .storage + .get_meta(e.key()) + .map_ok(|b| e.insert(b).as_deref()) + .map_err(Error::StorageError), + } + } + + /// Set metadata under `key`. This doesn't affect the state (root hash). + pub fn put_meta(&mut self, key: Vec, value: Vec) -> CostResult<(), Error> { + self.storage + .put_meta(&key, &value, None) + .map_ok(|_| { + self.meta_cache.insert(key, Some(value)); + }) + .map_err(Error::StorageError) + } + + /// Delete metadata under `key`. + pub fn delete_meta(&mut self, key: &[u8]) -> CostResult<(), Error> { + self.storage + .delete_meta(key, None) + .map_ok(|_| { + self.meta_cache.remove(key); + }) + .map_err(Error::StorageError) + } +} + +#[cfg(test)] +mod tests { + use grovedb_costs::OperationCost; + use grovedb_version::version::GroveVersion; + + use crate::test_utils::TempMerk; + + #[test] + fn meta_storage_data_retrieval() { + let version = GroveVersion::latest(); + let mut merk = TempMerk::new(&version); + + merk.put_meta(b"key".to_vec(), b"value".to_vec()) + .unwrap() + .unwrap(); + + let mut cost: OperationCost = Default::default(); + assert_eq!( + merk.get_meta(b"key".to_vec()) + .unwrap_add_cost(&mut cost) + .unwrap(), + Some(b"value".as_slice()) + ); + assert!(cost.is_nothing()); + } + + #[test] + fn meta_storage_works_uncommited() { + let version = GroveVersion::latest(); + let mut merk = TempMerk::new(&version); + + let mut cost_1: OperationCost = Default::default(); + assert!(merk + .get_meta(b"key".to_vec()) + .unwrap_add_cost(&mut cost_1) + .unwrap() + .is_none()); + assert!(!cost_1.is_nothing()); + + let mut cost_2: OperationCost = Default::default(); + assert!(merk + .get_meta(b"key".to_vec()) + .unwrap_add_cost(&mut cost_2) + .unwrap() + .is_none()); + assert!(cost_2.is_nothing()); + } + + #[test] + fn meta_storage_deletion() { + let version = GroveVersion::latest(); + let mut merk = TempMerk::new(&version); + + merk.put_meta(b"key".to_vec(), b"value".to_vec()) + .unwrap() + .unwrap(); + + assert_eq!( + merk.get_meta(b"key".to_vec()).unwrap().unwrap(), + Some(b"value".as_slice()) + ); + + merk.delete_meta(b"key").unwrap().unwrap(); + + assert!(merk.get_meta(b"key".to_vec()).unwrap().unwrap().is_none()); + } +} diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 27d679c1..dc495ed8 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -187,7 +187,7 @@ impl<'a, I: RawIterator> KVIterator<'a, I> { } // Cannot be an Iterator as it should return cost -impl<'a, I: RawIterator> KVIterator<'a, I> { +impl KVIterator<'_, I> { /// Next key-value pub fn next_kv(&mut self) -> CostContext, Vec)>> { let mut cost = OperationCost::default(); @@ -412,7 +412,7 @@ where // update pointer to root node cost_return_on_error_no_add!( - &inner_cost, + inner_cost, batch .put_root(ROOT_KEY_KEY, tree_key, costs) .map_err(CostsError) @@ -448,7 +448,7 @@ where for (key, maybe_sum_tree_cost, maybe_value, storage_cost) in to_batch { if let Some((value, left_size, right_size)) = maybe_value { cost_return_on_error_no_add!( - &cost, + cost, batch .put( &key, @@ -466,7 +466,7 @@ where for (key, value, storage_cost) in aux { match value { Op::Put(value, ..) => cost_return_on_error_no_add!( - &cost, + cost, batch .put_aux(key, value, storage_cost.clone()) .map_err(CostsError) @@ -474,7 +474,7 @@ where Op::Delete => batch.delete_aux(key, storage_cost.clone()), _ => { cost_return_on_error_no_add!( - &cost, + cost, Err(Error::InvalidOperation( "only put and delete allowed for aux storage" )) @@ -795,7 +795,7 @@ mod test { use grovedb_path::SubtreePath; use grovedb_storage::{ - rocksdb_storage::{PrefixedRocksDbStorageContext, RocksDbStorage}, + rocksdb_storage::{PrefixedRocksDbTransactionContext, RocksDbStorage}, RawIterator, Storage, StorageBatch, StorageContext, }; use grovedb_version::version::GroveVersion; @@ -1027,9 +1027,11 @@ mod test { let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1053,9 +1055,11 @@ mod test { let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1074,7 +1078,7 @@ mod test { fn reopen() { let grove_version = GroveVersion::latest(); fn collect( - mut node: RefWalker>, + mut node: RefWalker>, nodes: &mut Vec>, ) { let grove_version = GroveVersion::latest(); @@ -1109,9 +1113,15 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context( + SubtreePath::empty(), + Some(&batch), + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1125,12 +1135,13 @@ mod test { .unwrap(); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); + let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1144,14 +1155,19 @@ mod test { let mut nodes = vec![]; collect(walker, &mut nodes); + + storage.commit_transaction(transaction).unwrap().unwrap(); + nodes }; let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); + let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1169,7 +1185,7 @@ mod test { } type PrefixedStorageIter<'db, 'ctx> = - &'ctx mut as StorageContext<'db>>::RawIterator; + &'ctx mut as StorageContext<'db>>::RawIterator; #[test] fn reopen_iter() { @@ -1189,9 +1205,15 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context( + SubtreePath::empty(), + Some(&batch), + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1205,14 +1227,14 @@ mod test { .unwrap(); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let mut nodes = vec![]; let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1221,13 +1243,18 @@ mod test { .unwrap() .expect("cannot open merk"); collect(&mut merk.storage.raw_iter(), &mut nodes); + + storage.commit_transaction(transaction).unwrap().unwrap(); + nodes }; + let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let transaction = storage.start_transaction(); let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1249,9 +1276,11 @@ mod test { let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -1309,13 +1338,13 @@ mod test { assert_eq!(result, Some(b"b".to_vec())); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, diff --git a/merk/src/merk/open.rs b/merk/src/merk/open.rs index a3d4c16e..5f075157 100644 --- a/merk/src/merk/open.rs +++ b/merk/src/merk/open.rs @@ -113,12 +113,18 @@ mod test { let tmp_dir = TempDir::new().expect("cannot open tempdir"); let storage = RocksDbStorage::default_rocksdb_with_path(tmp_dir.path()) .expect("cannot open rocksdb storage"); + let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let test_prefix = [b"ayy"]; - let batch = StorageBatch::new(); let mut merk = Merk::open_base( storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), Some(&batch)) + .get_transactional_storage_context( + SubtreePath::from(test_prefix.as_ref()), + Some(&batch), + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -139,13 +145,17 @@ mod test { let root_hash = merk.root_hash(); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let merk = Merk::open_base( storage - .get_storage_context(SubtreePath::from(test_prefix.as_ref()), None) + .get_transactional_storage_context( + SubtreePath::from(test_prefix.as_ref()), + None, + &transaction, + ) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -161,10 +171,11 @@ mod test { let grove_version = GroveVersion::latest(); let storage = TempStorage::new(); let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); let merk_fee_context = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(&batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(&batch), &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, @@ -188,13 +199,13 @@ mod test { .expect("apply failed"); storage - .commit_multi_context_batch(batch, None) + .commit_multi_context_batch(batch, Some(&transaction)) .unwrap() .expect("cannot commit batch"); let merk_fee_context = Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, &transaction) .unwrap(), TreeType::NormalTree, None::<&fn(&[u8], &GroveVersion) -> Option>, diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 0c1784fd..02150410 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -555,7 +555,7 @@ mod tests { use grovedb_storage::{ rocksdb_storage::{ test_utils::TempStorage, PrefixedRocksDbImmediateStorageContext, - PrefixedRocksDbStorageContext, + PrefixedRocksDbTransactionContext, }, RawIterator, Storage, }; @@ -581,7 +581,7 @@ mod tests { Op::Push(Node::KV(vec![3], vec![3])), Op::Parent, ]; - assert!(Restorer::::verify_chunk( + assert!(Restorer::::verify_chunk( non_avl_tree_proof, &[0; 32], &None @@ -593,7 +593,7 @@ mod tests { fn test_chunk_verification_only_kv_feature_and_hash() { // should not accept kv let invalid_chunk_proof = vec![Op::Push(Node::KV(vec![1], vec![1]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -607,7 +607,7 @@ mod tests { // should not accept kvhash let invalid_chunk_proof = vec![Op::Push(Node::KVHash([0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -621,7 +621,7 @@ mod tests { // should not accept kvdigest let invalid_chunk_proof = vec![Op::Push(Node::KVDigest(vec![0], [0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -635,7 +635,7 @@ mod tests { // should not accept kvvaluehash let invalid_chunk_proof = vec![Op::Push(Node::KVValueHash(vec![0], vec![0], [0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, @@ -649,7 +649,7 @@ mod tests { // should not accept kvrefvaluehash let invalid_chunk_proof = vec![Op::Push(Node::KVRefValueHash(vec![0], vec![0], [0; 32]))]; - let verification_result = Restorer::::verify_chunk( + let verification_result = Restorer::::verify_chunk( invalid_chunk_proof, &[0; 32], &None, diff --git a/merk/src/merk/source.rs b/merk/src/merk/source.rs index 7c7568be..99a1a707 100644 --- a/merk/src/merk/source.rs +++ b/merk/src/merk/source.rs @@ -26,7 +26,7 @@ pub struct MerkSource<'s, S> { tree_type: TreeType, } -impl<'s, S> Clone for MerkSource<'s, S> { +impl Clone for MerkSource<'_, S> { fn clone(&self) -> Self { MerkSource { storage: self.storage, @@ -35,7 +35,7 @@ impl<'s, S> Clone for MerkSource<'s, S> { } } -impl<'s, 'db, S> Fetch for MerkSource<'s, S> +impl<'db, S> Fetch for MerkSource<'_, S> where S: StorageContext<'db>, { diff --git a/merk/src/merk/tree_type.rs b/merk/src/merk/tree_type.rs new file mode 100644 index 00000000..ef845f21 --- /dev/null +++ b/merk/src/merk/tree_type.rs @@ -0,0 +1,78 @@ +use std::fmt; + +use crate::{merk::NodeType, Error, TreeFeatureType}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum MaybeTree { + Tree(TreeType), + NotTree, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum TreeType { + NormalTree = 0, + SumTree = 1, + BigSumTree = 2, + CountTree = 3, + CountSumTree = 4, +} + +impl TryFrom for TreeType { + type Error = Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(TreeType::NormalTree), + 1 => Ok(TreeType::SumTree), + 2 => Ok(TreeType::BigSumTree), + 3 => Ok(TreeType::CountTree), + 4 => Ok(TreeType::CountSumTree), + n => Err(Error::UnknownTreeType(format!("got {}, max is 4", n))), // Error handling + } + } +} + +impl fmt::Display for TreeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match *self { + TreeType::NormalTree => "Normal Tree", + TreeType::SumTree => "Sum Tree", + TreeType::BigSumTree => "Big Sum Tree", + TreeType::CountTree => "Count Tree", + TreeType::CountSumTree => "Count Sum Tree", + }; + write!(f, "{}", s) + } +} + +impl TreeType { + pub fn allows_sum_item(&self) -> bool { + match self { + TreeType::NormalTree => false, + TreeType::SumTree => true, + TreeType::BigSumTree => true, + TreeType::CountTree => false, + TreeType::CountSumTree => true, + } + } + + pub const fn inner_node_type(&self) -> NodeType { + match self { + TreeType::NormalTree => NodeType::NormalNode, + TreeType::SumTree => NodeType::SumNode, + TreeType::BigSumTree => NodeType::BigSumNode, + TreeType::CountTree => NodeType::CountNode, + TreeType::CountSumTree => NodeType::CountSumNode, + } + } + + pub fn empty_tree_feature_type(&self) -> TreeFeatureType { + match self { + TreeType::NormalTree => TreeFeatureType::BasicMerkNode, + TreeType::SumTree => TreeFeatureType::SummedMerkNode(0), + TreeType::BigSumTree => TreeFeatureType::BigSummedMerkNode(0), + TreeType::CountTree => TreeFeatureType::CountedMerkNode(0), + TreeType::CountSumTree => TreeFeatureType::CountedSummedMerkNode(0, 0), + } + } +} diff --git a/merk/src/proofs/chunk/chunk.rs b/merk/src/proofs/chunk/chunk.rs index 4960c53f..f40c761c 100644 --- a/merk/src/proofs/chunk/chunk.rs +++ b/merk/src/proofs/chunk/chunk.rs @@ -40,7 +40,7 @@ use crate::{ pub const LEFT: bool = true; pub const RIGHT: bool = false; -impl<'a, S> RefWalker<'a, S> +impl RefWalker<'_, S> where S: Fetch + Sized + Clone, { @@ -214,7 +214,7 @@ pub fn verify_height_proof(proof: Vec, expected_root_hash: CryptoHash) -> Re // TODO: add documentation pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { - return Ok(match height_proof_tree.child(LEFT) { + Ok(match height_proof_tree.child(LEFT) { Some(child) => { if !matches!(child.tree.node, Node::KVHash(..)) { // todo deal with old chunk restoring error @@ -225,7 +225,7 @@ pub fn verify_height_tree(height_proof_tree: &Tree) -> Result { verify_height_tree(&child.tree)? + 1 } None => 1, - }); + }) } #[cfg(test)] diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index 5996e388..745c5d1c 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -414,7 +414,7 @@ impl<'a> Decoder<'a> { } #[cfg(any(feature = "minimal", feature = "verify"))] -impl<'a> Iterator for Decoder<'a> { +impl Iterator for Decoder<'_> { type Item = Result; fn next(&mut self) -> Option { diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 8c3b5652..d5331548 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -107,7 +107,7 @@ impl Map { /// of keys. If during iteration we encounter a gap in the data (e.g. the /// proof did not include all nodes within the range), the iterator will /// yield an error. - pub fn range<'a, R: RangeBounds<&'a [u8]>>(&'a self, bounds: R) -> Range { + pub fn range<'a, R: RangeBounds<&'a [u8]>>(&'a self, bounds: R) -> Range<'a> { let start_key = bound_to_inner(bounds.start_bound()).map(|x| (*x).into()); let bounds = bounds_to_vec(bounds); @@ -159,7 +159,7 @@ pub struct Range<'a> { } #[cfg(feature = "minimal")] -impl<'a> Range<'a> { +impl Range<'_> { /// Returns an error if the proof does not properly prove the end of the /// range. fn check_end_bound(&self) -> Result<(), Error> { diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index bd33d4b0..b6e9c0c6 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -702,7 +702,7 @@ impl Link { } #[cfg(feature = "minimal")] -impl<'a, S> RefWalker<'a, S> +impl RefWalker<'_, S> where S: Fetch + Sized + Clone, { diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index dafd09aa..8a378bd1 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -374,11 +374,11 @@ where } for op in ops { - match cost_return_on_error_no_add!(&cost, op) { + match cost_return_on_error_no_add!(cost, op) { Op::Parent => { let (mut parent, child) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -395,8 +395,8 @@ where } Op::Child => { let (child, mut parent) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -413,8 +413,8 @@ where } Op::ParentInverted => { let (mut parent, child) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -431,8 +431,8 @@ where } Op::ChildInverted => { let (child, mut parent) = ( - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), - cost_return_on_error_no_add!(&cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), + cost_return_on_error_no_add!(cost, try_pop(&mut stack)), ); cost_return_on_error!( &mut cost, @@ -465,7 +465,7 @@ where maybe_last_key = Some(key.clone()); } - cost_return_on_error_no_add!(&cost, visit_node(&node)); + cost_return_on_error_no_add!(cost, visit_node(&node)); let tree: Tree = node.into(); stack.push(tree); @@ -488,7 +488,7 @@ where maybe_last_key = Some(key.clone()); } - cost_return_on_error_no_add!(&cost, visit_node(&node)); + cost_return_on_error_no_add!(cost, visit_node(&node)); let tree: Tree = node.into(); stack.push(tree); diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 76eec948..8fa8f704 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -311,15 +311,16 @@ pub fn make_tree_seq_with_start_key( /// Shortcut to open a Merk with a provided storage and batch pub fn empty_path_merk<'db, S>( storage: &'db S, + transaction: &'db >::Transaction, batch: &'db StorageBatch, grove_version: &GroveVersion, -) -> Merk<>::BatchStorageContext> +) -> Merk<>::BatchTransactionalStorageContext> where S: Storage<'db>, { Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), Some(batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(batch), transaction) .unwrap(), TreeType::NormalTree, None:: Option>, @@ -332,14 +333,15 @@ where /// Shortcut to open a Merk for read only pub fn empty_path_merk_read_only<'db, S>( storage: &'db S, + transaction: &'db >::Transaction, grove_version: &GroveVersion, -) -> Merk<>::BatchStorageContext> +) -> Merk<>::BatchTransactionalStorageContext> where S: Storage<'db>, { Merk::open_base( storage - .get_storage_context(SubtreePath::empty(), None) + .get_transactional_storage_context(SubtreePath::empty(), None, transaction) .unwrap(), TreeType::NormalTree, None:: Option>, diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index a9b3b26e..dafd81d0 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -28,40 +28,42 @@ //! Temp merk test utils -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] use std::ops::{Deref, DerefMut}; use grovedb_path::SubtreePath; -use grovedb_storage::StorageBatch; -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] +use grovedb_storage::{rocksdb_storage::test_utils::TempStorage, Storage}; use grovedb_storage::{ - rocksdb_storage::{test_utils::TempStorage, PrefixedRocksDbStorageContext}, - Storage, + rocksdb_storage::{PrefixedRocksDbTransactionContext, RocksDbStorage}, + StorageBatch, }; use grovedb_version::version::GroveVersion; -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] use crate::Merk; -use crate::{tree::kv::ValueDefinedCostType, tree_type::TreeType}; +use crate::{tree::kv::ValueDefinedCostType, TreeType}; -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] /// Wraps a Merk instance and deletes it from disk it once it goes out of scope. pub struct TempMerk { storage: &'static TempStorage, batch: &'static StorageBatch, - merk: Merk>, + merk: Merk>, + tx: &'static >::Transaction, } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl TempMerk { /// Opens a `TempMerk` at the given file path, creating a new one if it /// does not exist. pub fn new(grove_version: &GroveVersion) -> Self { let storage = Box::leak(Box::new(TempStorage::new())); let batch = Box::leak(Box::new(StorageBatch::new())); + let tx = Box::leak(Box::new(storage.start_transaction())); let context = storage - .get_storage_context(SubtreePath::empty(), Some(batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(batch), tx) .unwrap(); let merk = Merk::open_base( @@ -76,20 +78,32 @@ impl TempMerk { storage, merk, batch, + tx, } } /// Commits pending batch operations. pub fn commit(&mut self, grove_version: &GroveVersion) { - let batch = unsafe { Box::from_raw(self.batch as *const _ as *mut StorageBatch) }; + let batch: Box = + unsafe { Box::from_raw(self.batch as *const _ as *mut StorageBatch) }; + let tx: Box<>::Transaction> = unsafe { + Box::from_raw( + self.tx as *const _ as *mut >::Transaction, + ) + }; self.storage - .commit_multi_context_batch(*batch, None) + .commit_multi_context_batch(*batch, Some(self.tx)) .unwrap() .expect("unable to commit batch"); + self.storage + .commit_transaction(*tx) + .unwrap() + .expect("unable to commit transaction"); self.batch = Box::leak(Box::new(StorageBatch::new())); + self.tx = Box::leak(Box::new(self.storage.start_transaction())); let context = self .storage - .get_storage_context(SubtreePath::empty(), Some(self.batch)) + .get_transactional_storage_context(SubtreePath::empty(), Some(self.batch), self.tx) .unwrap(); self.merk = Merk::open_base( context, @@ -102,36 +116,42 @@ impl TempMerk { } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl Drop for TempMerk { fn drop(&mut self) { unsafe { let batch = Box::from_raw(self.batch as *const _ as *mut StorageBatch); - let _ = self.storage.commit_multi_context_batch(*batch, None); + + let tx: Box<>::Transaction> = Box::from_raw( + self.tx as *const _ as *mut >::Transaction, + ); + + let _ = self.storage.commit_multi_context_batch(*batch, Some(&tx)); + let _ = self.storage.commit_transaction(*tx).unwrap(); drop(Box::from_raw(self.storage as *const _ as *mut TempStorage)); } } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl Default for TempMerk { fn default() -> Self { Self::new(GroveVersion::latest()) } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl Deref for TempMerk { - type Target = Merk>; + type Target = Merk>; - fn deref(&self) -> &Merk> { + fn deref(&self) -> &Merk> { &self.merk } } -#[cfg(feature = "minimal")] +#[cfg(feature = "full")] impl DerefMut for TempMerk { - fn deref_mut(&mut self) -> &mut Merk> { + fn deref_mut(&mut self) -> &mut Merk> { &mut self.merk } } diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 2ef07cf5..176f8c3b 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -52,7 +52,7 @@ impl TreeNode { let tree_bytes = cost_return_on_error!(&mut cost, storage.get(&key).map_err(StorageError)); let tree_opt = cost_return_on_error_no_add!( - &cost, + cost, tree_bytes .map(|x| TreeNode::decode_raw( &x, diff --git a/merk/src/tree/iter.rs b/merk/src/tree/iter.rs index 96c04e29..0566d044 100644 --- a/merk/src/tree/iter.rs +++ b/merk/src/tree/iter.rs @@ -65,7 +65,7 @@ impl<'a> TreeNode { } #[cfg(feature = "minimal")] -impl<'a> Iterator for Iter<'a> { +impl Iterator for Iter<'_> { type Item = (Vec, Vec); /// Traverses to and yields the next key/value pair, in key order. diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index 460edbce..a1cda4d1 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -459,7 +459,7 @@ impl TreeNode { match link.aggregate_data() { AggregateData::NoAggregateData => 0, AggregateData::Sum(s) => s.encode_var_vec().len() as u32, - AggregateData::BigSum(_) => 16 as u32, + AggregateData::BigSum(_) => 16_u32, AggregateData::Count(c) => c.encode_var_vec().len() as u32, AggregateData::CountAndSum(c, s) => { s.encode_var_vec().len() as u32 + c.encode_var_vec().len() as u32 @@ -809,7 +809,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -865,7 +865,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -919,7 +919,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -981,7 +981,7 @@ impl TreeNode { // in this case there is a possibility that the client would want to update the // element flags based on the change of values cost_return_on_error_no_add!( - &cost, + cost, self.just_in_time_tree_node_value_update( old_specialized_cost, get_temp_new_value_with_old_flags, @@ -1064,7 +1064,7 @@ impl TreeNode { } } - cost_return_on_error_no_add!(&cost, c.write(self, old_specialized_cost,)); + cost_return_on_error_no_add!(cost, c.write(self, old_specialized_cost,)); // println!("done committing {}", std::str::from_utf8(self.key()).unwrap()); diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index 2e2cf3fd..6eed0138 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -514,13 +514,13 @@ where Delete => self.tree().inner.kv.value_byte_cost_size(), DeleteLayered | DeleteLayeredMaybeSpecialized => { cost_return_on_error_no_add!( - &cost, + cost, old_specialized_cost(&key_vec, value) ) } DeleteMaybeSpecialized => { cost_return_on_error_no_add!( - &cost, + cost, old_specialized_cost(&key_vec, value) ) } @@ -534,7 +534,7 @@ where prefixed_key_len + prefixed_key_len.required_space() as u32; let value = self.tree().value_ref(); cost_return_on_error_no_add!( - &cost, + cost, section_removal_bytes(value, total_key_len, old_cost) ) }; diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 834643a1..aebae47d 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -230,7 +230,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value( value, @@ -275,7 +275,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value_with_fixed_cost( value, @@ -321,7 +321,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value_and_reference_value_hash( value, @@ -368,7 +368,7 @@ where ) -> CostResult { let mut cost = OperationCost::default(); cost_return_on_error_no_add!( - &cost, + cost, self.tree.own_result(|t| t .put_value_with_reference_value_hash_and_value_cost( value, diff --git a/merk/src/visualize.rs b/merk/src/visualize.rs index 0235f92d..d3fe17e2 100644 --- a/merk/src/visualize.rs +++ b/merk/src/visualize.rs @@ -65,8 +65,8 @@ impl<'a, F> VisualizableTree<'a, F> { } } -impl<'a, 'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize - for VisualizeableMerk<'a, S, F> +impl<'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize + for VisualizeableMerk<'_, S, F> { fn visualize(&self, mut drawer: Drawer) -> Result> { drawer.write(b"Merk root: ")?; @@ -84,7 +84,7 @@ impl<'a, 'db, S: StorageContext<'db>, T: Visualize, F: Fn(&[u8]) -> T + Copy> Vi } } -impl<'a, T: Visualize, F: Fn(&[u8]) -> T + Copy> Visualize for VisualizableTree<'a, F> { +impl T + Copy> Visualize for VisualizableTree<'_, F> { fn visualize(&self, mut drawer: Drawer) -> Result> { drawer.write(b"[key: ")?; drawer = self.tree.inner.kv.key_as_ref().visualize(drawer)?; diff --git a/path/src/subtree_path.rs b/path/src/subtree_path.rs index ae8cd900..179db8f2 100644 --- a/path/src/subtree_path.rs +++ b/path/src/subtree_path.rs @@ -35,7 +35,8 @@ //! subtree paths and other path references if use as generic [Into]. use std::{ - fmt::{Display, Formatter}, + cmp, + fmt::{self, Display}, hash::{Hash, Hasher}, }; @@ -51,30 +52,30 @@ pub struct SubtreePath<'b, B> { pub(crate) ref_variant: SubtreePathInner<'b, B>, } -fn hex_to_ascii(hex_value: &[u8]) -> String { - // Define the set of allowed characters - const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ +impl> Display for SubtreePath<'_, B> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result { + fn bytes_to_hex_or_ascii(bytes: &[u8]) -> String { + // Define the set of allowed characters + const ALLOWED_CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ abcdefghijklmnopqrstuvwxyz\ 0123456789_-/\\[]@"; - // Check if all characters in hex_value are allowed - if hex_value.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { - // Try to convert to UTF-8 - String::from_utf8(hex_value.to_vec()) - .unwrap_or_else(|_| format!("0x{}", hex::encode(hex_value))) - } else { - // Hex encode and prepend "0x" - format!("0x{}", hex::encode(hex_value)) - } -} + // Check if all characters in hex_value are allowed + if bytes.iter().all(|&c| ALLOWED_CHARS.contains(&c)) { + // Try to convert to UTF-8 + String::from_utf8(bytes.to_vec()) + .unwrap_or_else(|_| format!("0x{}", hex::encode(bytes))) + } else { + // Hex encode and prepend "0x" + format!("0x{}", hex::encode(bytes)) + } + } -impl<'b, B: AsRef<[u8]>> Display for SubtreePath<'b, B> { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match &self.ref_variant { SubtreePathInner::Slice(slice) => { let ascii_path = slice .iter() - .map(|e| hex_to_ascii(e.as_ref())) + .map(|e| bytes_to_hex_or_ascii(e.as_ref())) .collect::>() .join("/"); write!(f, "{}", ascii_path) @@ -83,13 +84,17 @@ impl<'b, B: AsRef<[u8]>> Display for SubtreePath<'b, B> { let ascii_path = subtree_path .to_vec() .into_iter() - .map(|a| hex_to_ascii(a.as_slice())) + .map(|a| bytes_to_hex_or_ascii(a.as_slice())) .collect::>() .join("/"); write!(f, "{}", ascii_path) } SubtreePathInner::SubtreePathIter(iter) => { - let ascii_path = iter.clone().map(hex_to_ascii).collect::>().join("/"); + let ascii_path = iter + .clone() + .map(bytes_to_hex_or_ascii) + .collect::>() + .join("/"); write!(f, "{}", ascii_path) } } @@ -114,7 +119,7 @@ pub(crate) enum SubtreePathInner<'b, B> { SubtreePathIter(SubtreePathIter<'b, B>), } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePath<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePath<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -126,7 +131,78 @@ where } } -impl<'b, B: AsRef<[u8]>> Eq for SubtreePath<'b, B> {} +/// First and foremost, the order of subtree paths is dictated by their lengths. +/// Therefore, those subtrees closer to the root will come first. The rest it +/// can guarantee is to be free of false equality; however, seemingly unrelated +/// subtrees can come one after another if they share the same length, which was +/// (not) done for performance reasons. +impl<'br, BL, BR> PartialOrd> for SubtreePath<'_, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePath<'br, BR>) -> Option { + let iter_a = self.clone().into_reverse_iter(); + let iter_b = other.clone().into_reverse_iter(); + + Some( + iter_a + .len() + .cmp(&iter_b.len()) + .reverse() + .then_with(|| iter_a.cmp(iter_b)), + ) + } +} + +impl<'br, BL, BR> PartialOrd> for SubtreePathBuilder<'_, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePathBuilder<'br, BR>) -> Option { + let iter_a = self.reverse_iter(); + let iter_b = other.reverse_iter(); + + Some( + iter_a + .len() + .cmp(&iter_b.len()) + .reverse() + .then_with(|| iter_a.cmp(iter_b)), + ) + } +} + +impl<'br, BL, BR> PartialOrd> for SubtreePath<'_, BL> +where + BL: AsRef<[u8]>, + BR: AsRef<[u8]>, +{ + fn partial_cmp(&self, other: &SubtreePathBuilder<'br, BR>) -> Option { + self.partial_cmp(&SubtreePath::from(other)) + } +} + +impl Ord for SubtreePath<'_, BL> +where + BL: AsRef<[u8]>, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.partial_cmp(other).expect("order is totally defined") + } +} + +impl Ord for SubtreePathBuilder<'_, BL> +where + BL: AsRef<[u8]>, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.partial_cmp(other).expect("order is totally defined") + } +} + +impl> Eq for SubtreePath<'_, B> {} impl<'b, B> From> for SubtreePath<'b, B> { fn from(ref_variant: SubtreePathInner<'b, B>) -> Self { @@ -156,7 +232,7 @@ impl<'s, 'b, B> From<&'s SubtreePathBuilder<'b, B>> for SubtreePath<'s, B> { /// Hash order is the same as iteration order: from most deep path segment up to /// root. -impl<'b, B: AsRef<[u8]>> Hash for SubtreePath<'b, B> { +impl> Hash for SubtreePath<'_, B> { fn hash(&self, state: &mut H) { match &self.ref_variant { SubtreePathInner::Slice(slice) => slice @@ -222,7 +298,7 @@ impl<'b, B: AsRef<[u8]>> SubtreePath<'b, B> { } /// Get a derived path with a child path segment added. - pub fn derive_owned_with_child<'s, S>(&'b self, segment: S) -> SubtreePathBuilder<'b, B> + pub fn derive_owned_with_child<'s, S>(&self, segment: S) -> SubtreePathBuilder<'b, B> where S: Into>, 's: 'b, @@ -322,4 +398,29 @@ mod tests { assert_eq!(as_vec, reference_vec); assert_eq!(parent.len(), reference_vec.len()); } + + #[test] + fn ordering() { + let path_a: SubtreePath<_> = (&[b"one" as &[u8], b"two", b"three"]).into(); + let path_b = path_a.derive_owned_with_child(b"four"); + let path_c = path_a.derive_owned_with_child(b"notfour"); + let (path_d_parent, _) = path_a.derive_parent().unwrap(); + let path_d = path_d_parent.derive_owned_with_child(b"three"); + + // Same lengths for different paths don't make them equal: + assert!(!matches!( + SubtreePath::from(&path_b).cmp(&SubtreePath::from(&path_c)), + cmp::Ordering::Equal + )); + + // Equal paths made the same way are equal: + assert!(matches!( + path_a.cmp(&SubtreePath::from(&path_d)), + cmp::Ordering::Equal + )); + + // Longer paths come first + assert!(path_a > path_b); + assert!(path_a > path_c); + } } diff --git a/path/src/subtree_path_builder.rs b/path/src/subtree_path_builder.rs index 4ef25f0a..d834a5f3 100644 --- a/path/src/subtree_path_builder.rs +++ b/path/src/subtree_path_builder.rs @@ -46,16 +46,25 @@ pub struct SubtreePathBuilder<'b, B> { pub(crate) relative: SubtreePathRelative<'b>, } +impl Clone for SubtreePathBuilder<'_, B> { + fn clone(&self) -> Self { + SubtreePathBuilder { + base: self.base.clone(), + relative: self.relative.clone(), + } + } +} + /// Hash order is the same as iteration order: from most deep path segment up to /// root. -impl<'b, B: AsRef<[u8]>> Hash for SubtreePathBuilder<'b, B> { +impl> Hash for SubtreePathBuilder<'_, B> { fn hash(&self, state: &mut H) { self.relative.hash(state); self.base.hash(state); } } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePathBuilder<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePathBuilder<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -65,7 +74,7 @@ where } } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePath<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePath<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -75,7 +84,7 @@ where } } -impl<'bl, 'br, BL, BR> PartialEq> for SubtreePathBuilder<'bl, BL> +impl<'br, BL, BR> PartialEq> for SubtreePathBuilder<'_, BL> where BL: AsRef<[u8]>, BR: AsRef<[u8]>, @@ -85,7 +94,7 @@ where } } -impl<'b, B: AsRef<[u8]>> Eq for SubtreePathBuilder<'b, B> {} +impl> Eq for SubtreePathBuilder<'_, B> {} impl<'s, 'b, B> From<&'s SubtreePath<'b, B>> for SubtreePathBuilder<'b, B> { fn from(value: &'s SubtreePath<'b, B>) -> Self { @@ -97,7 +106,7 @@ impl<'s, 'b, B> From<&'s SubtreePath<'b, B>> for SubtreePathBuilder<'b, B> { } /// Derived subtree path on top of base path. -#[derive(Debug)] +#[derive(Debug, Clone)] pub(crate) enum SubtreePathRelative<'r> { /// Equivalent to the base path. Empty, @@ -149,6 +158,28 @@ impl Default for SubtreePathBuilder<'static, [u8; 0]> { } } +impl SubtreePathBuilder<'_, B> { + /// Makes an owned `SubtreePathBuilder` out of iterator. + pub fn owned_from_iter>(iter: impl IntoIterator) -> Self { + let bytes = iter.into_iter().fold(CompactBytes::new(), |mut bytes, s| { + bytes.add_segment(s.as_ref()); + bytes + }); + + SubtreePathBuilder { + base: SubtreePath { + ref_variant: SubtreePathInner::Slice(&[]), + }, + relative: SubtreePathRelative::Multi(bytes), + } + } + + /// Create an owned version of `SubtreePathBuilder` from `SubtreePath`. + pub fn owned_from_path>(path: SubtreePath) -> Self { + Self::owned_from_iter(path.to_vec()) + } +} + impl SubtreePathBuilder<'_, B> { /// Returns the length of the subtree path. pub fn len(&self) -> usize { @@ -159,6 +190,24 @@ impl SubtreePathBuilder<'_, B> { pub fn is_empty(&self) -> bool { self.base.is_empty() && self.relative.is_empty() } + + /// Adds path segment in place. + pub fn push_segment(&mut self, segment: &[u8]) { + match &mut self.relative { + SubtreePathRelative::Empty => { + let mut bytes = CompactBytes::new(); + bytes.add_segment(segment); + self.relative = SubtreePathRelative::Multi(bytes); + } + SubtreePathRelative::Single(old_segment) => { + let mut bytes = CompactBytes::new(); + bytes.add_segment(old_segment); + bytes.add_segment(segment); + self.relative = SubtreePathRelative::Multi(bytes); + } + SubtreePathRelative::Multi(bytes) => bytes.add_segment(segment), + } + } } impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { @@ -191,6 +240,38 @@ impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { } } + /// Get a derived path for a parent and a chopped segment. The lifetime of + /// returned path is constrained solely by the original slice that this + /// whole path hierarchy is based upon, and the point of derivation has + /// no effect on it. + pub fn derive_parent_owned(&self) -> Option<(SubtreePathBuilder<'b, B>, Vec)> { + match &self.relative { + SubtreePathRelative::Empty => self + .base + .derive_parent() + .map(|(path, key)| (path.derive_owned(), key.to_vec())), + SubtreePathRelative::Single(relative) => { + Some((self.base.derive_owned(), relative.to_vec())) + } + SubtreePathRelative::Multi(bytes) => { + let mut new_bytes = bytes.clone(); + if let Some(key) = new_bytes.pop_segment() { + Some(( + SubtreePathBuilder { + base: self.base.clone(), + relative: SubtreePathRelative::Multi(new_bytes), + }, + key, + )) + } else { + self.base + .derive_parent() + .map(|(path, key)| (path.derive_owned(), key.to_vec())) + } + } + } + } + /// Get a derived path with a child path segment added. pub fn derive_owned_with_child<'s, S>(&'b self, segment: S) -> SubtreePathBuilder<'b, B> where @@ -203,24 +284,6 @@ impl<'b, B: AsRef<[u8]>> SubtreePathBuilder<'b, B> { } } - /// Adds path segment in place. - pub fn push_segment(&mut self, segment: &[u8]) { - match &mut self.relative { - SubtreePathRelative::Empty => { - let mut bytes = CompactBytes::new(); - bytes.add_segment(segment); - self.relative = SubtreePathRelative::Multi(bytes); - } - SubtreePathRelative::Single(old_segment) => { - let mut bytes = CompactBytes::new(); - bytes.add_segment(old_segment); - bytes.add_segment(segment); - self.relative = SubtreePathRelative::Multi(bytes); - } - SubtreePathRelative::Multi(bytes) => bytes.add_segment(segment), - } - } - /// Returns an iterator for the subtree path by path segments. pub fn reverse_iter(&'b self) -> SubtreePathIter<'b, B> { match &self.relative { diff --git a/path/src/subtree_path_iter.rs b/path/src/subtree_path_iter.rs index 2ca65866..f5e2aeaa 100644 --- a/path/src/subtree_path_iter.rs +++ b/path/src/subtree_path_iter.rs @@ -42,7 +42,7 @@ pub struct SubtreePathIter<'b, B> { next_subtree_path: Option<&'b SubtreePath<'b, B>>, } -impl<'b, B> Clone for SubtreePathIter<'b, B> { +impl Clone for SubtreePathIter<'_, B> { fn clone(&self) -> Self { SubtreePathIter { current_iter: self.current_iter.clone(), @@ -147,12 +147,12 @@ impl CurrentSubtreePathIter<'_, B> { } } -impl<'b, B> Clone for CurrentSubtreePathIter<'b, B> { +impl Clone for CurrentSubtreePathIter<'_, B> { fn clone(&self) -> Self { match self { CurrentSubtreePathIter::Single(x) => CurrentSubtreePathIter::Single(x), CurrentSubtreePathIter::Slice(x) => CurrentSubtreePathIter::Slice(x.clone()), - CurrentSubtreePathIter::OwnedBytes(x) => CurrentSubtreePathIter::OwnedBytes(x.clone()), + CurrentSubtreePathIter::OwnedBytes(x) => CurrentSubtreePathIter::OwnedBytes(*x), } } } diff --git a/path/src/util/compact_bytes.rs b/path/src/util/compact_bytes.rs index c44b6dd9..e2093240 100644 --- a/path/src/util/compact_bytes.rs +++ b/path/src/util/compact_bytes.rs @@ -31,7 +31,7 @@ use std::mem; /// Bytes vector wrapper to have multiple byte arrays allocated continuosuly. -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub(crate) struct CompactBytes { n_segments: usize, data: Vec, @@ -64,6 +64,29 @@ impl CompactBytes { pub fn len(&self) -> usize { self.n_segments } + + pub fn pop_segment(&mut self) -> Option> { + if self.n_segments < 1 { + return None; + } + + let length_size = mem::size_of::(); + let last_segment_length = usize::from_ne_bytes( + self.data[self.data.len() - length_size..] + .try_into() + .expect("internal structure bug"), + ); + + let segment = self.data + [self.data.len() - last_segment_length - length_size..self.data.len() - length_size] + .to_vec(); + + self.data + .truncate(self.data.len() - last_segment_length - length_size); + self.n_segments -= 1; + + Some(segment) + } } #[derive(Debug, Clone, Copy)] @@ -160,4 +183,25 @@ mod tests { assert_eq!(iter.next(), None); assert_eq!(iter.next(), None); } + + #[test] + fn pop_segment() { + let mut bytes = CompactBytes::default(); + bytes.add_segment(b"ayya"); + bytes.add_segment(b"ayyb"); + bytes.add_segment(b"ayyc"); + bytes.add_segment(b"ayyd"); + + assert_eq!(bytes.pop_segment(), Some(b"ayyd".to_vec())); + assert_eq!(bytes.pop_segment(), Some(b"ayyc".to_vec())); + + let mut v: Vec<_> = bytes.reverse_iter().collect(); + v.reverse(); + assert_eq!(v, vec![b"ayya".to_vec(), b"ayyb".to_vec()]); + + assert_eq!(bytes.pop_segment(), Some(b"ayyb".to_vec())); + assert_eq!(bytes.pop_segment(), Some(b"ayya".to_vec())); + assert_eq!(bytes.pop_segment(), None); + assert_eq!(bytes.pop_segment(), None); + } } diff --git a/path/src/util/cow_like.rs b/path/src/util/cow_like.rs index 78608ec8..02a53537 100644 --- a/path/src/util/cow_like.rs +++ b/path/src/util/cow_like.rs @@ -35,7 +35,7 @@ use std::{ /// A smart pointer that follows the semantics of [Cow](std::borrow::Cow) except /// provides no means for mutability and thus doesn't require [Clone]. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum CowLike<'b> { Owned(Vec), Borrowed(&'b [u8]), diff --git a/storage/src/rocksdb_storage.rs b/storage/src/rocksdb_storage.rs index 14c4df5a..2905adce 100644 --- a/storage/src/rocksdb_storage.rs +++ b/storage/src/rocksdb_storage.rs @@ -36,7 +36,7 @@ mod tests; pub use rocksdb::{Error, WriteBatchWithTransaction}; pub use storage_context::{ PrefixedRocksDbBatch, PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbRawIterator, - PrefixedRocksDbStorageContext, PrefixedRocksDbTransactionContext, + PrefixedRocksDbTransactionContext, }; pub use self::storage::RocksDbStorage; diff --git a/storage/src/rocksdb_storage/storage.rs b/storage/src/rocksdb_storage/storage.rs index f43c05e2..9a3d66ec 100644 --- a/storage/src/rocksdb_storage/storage.rs +++ b/storage/src/rocksdb_storage/storage.rs @@ -44,10 +44,7 @@ use rocksdb::{ Transaction, WriteBatchWithTransaction, DEFAULT_COLUMN_FAMILY_NAME, }; -use super::{ - PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbStorageContext, - PrefixedRocksDbTransactionContext, -}; +use super::{PrefixedRocksDbImmediateStorageContext, PrefixedRocksDbTransactionContext}; use crate::{ error, error::Error::{CostError, RocksDBError}, @@ -190,7 +187,7 @@ impl RocksDbStorage { db_batch.put(&key, &value); cost.seek_count += 1; cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -209,7 +206,7 @@ impl RocksDbStorage { db_batch.put_cf(cf_aux(&self.db), &key, &value); cost.seek_count += 1; cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -230,7 +227,7 @@ impl RocksDbStorage { // We only add costs for put root if they are set, otherwise it is free if cost_info.is_some() { cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -250,7 +247,7 @@ impl RocksDbStorage { db_batch.put_cf(cf_meta(&self.db), &key, &value); cost.seek_count += 1; cost_return_on_error_no_add!( - &cost, + cost, pending_costs .add_key_value_storage_costs( key.len() as u32, @@ -274,7 +271,7 @@ impl RocksDbStorage { cost.seek_count += 2; // lets get the values let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db.get(&key).map_err(RocksDBError) ) .map(|x| x.len() as u32) @@ -301,7 +298,7 @@ impl RocksDbStorage { } else { cost.seek_count += 2; let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db.get_cf(cf_aux(&self.db), &key).map_err(RocksDBError) ) .map(|x| x.len() as u32) @@ -329,7 +326,7 @@ impl RocksDbStorage { } else { cost.seek_count += 2; let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db .get_cf(cf_roots(&self.db), &key) .map_err(RocksDBError) @@ -359,7 +356,7 @@ impl RocksDbStorage { } else { cost.seek_count += 2; let value_len = cost_return_on_error_no_add!( - &cost, + cost, self.db .get_cf(cf_meta(&self.db), &key) .map_err(RocksDBError) @@ -434,7 +431,6 @@ impl RocksDbStorage { } impl<'db> Storage<'db> for RocksDbStorage { - type BatchStorageContext = PrefixedRocksDbStorageContext<'db>; type BatchTransactionalStorageContext = PrefixedRocksDbTransactionContext<'db>; type ImmediateStorageContext = PrefixedRocksDbImmediateStorageContext<'db>; type Transaction = Tx<'db>; @@ -459,27 +455,6 @@ impl<'db> Storage<'db> for RocksDbStorage { self.db.flush().map_err(RocksDBError) } - fn get_storage_context<'b, B>( - &'db self, - path: SubtreePath<'b, B>, - batch: Option<&'db StorageBatch>, - ) -> CostContext - where - B: AsRef<[u8]> + 'b, - { - Self::build_prefix(path) - .map(|prefix| PrefixedRocksDbStorageContext::new(&self.db, prefix, batch)) - } - - fn get_storage_context_by_subtree_prefix( - &'db self, - prefix: SubtreePrefix, - batch: Option<&'db StorageBatch>, - ) -> CostContext { - PrefixedRocksDbStorageContext::new(&self.db, prefix, batch) - .wrap_with_cost(OperationCost::default()) - } - fn get_transactional_storage_context<'b, B>( &'db self, path: SubtreePath<'b, B>, @@ -621,11 +596,13 @@ mod tests { }; let batch = StorageBatch::new(); + let transaction = storage.start_transaction(); + let left = storage - .get_storage_context(left_path.clone(), Some(&batch)) + .get_transactional_storage_context(left_path.clone(), Some(&batch), &transaction) .unwrap(); let right = storage - .get_storage_context(right_path.clone(), Some(&batch)) + .get_transactional_storage_context(right_path.clone(), Some(&batch), &transaction) .unwrap(); left.put(b"a", b"a", None, None).unwrap().unwrap(); @@ -643,10 +620,10 @@ mod tests { let batch = StorageBatch::new(); let left = storage - .get_storage_context(left_path.clone(), Some(&batch)) + .get_transactional_storage_context(left_path.clone(), Some(&batch), &transaction) .unwrap(); let right = storage - .get_storage_context(right_path.clone(), Some(&batch)) + .get_transactional_storage_context(right_path.clone(), Some(&batch), &transaction) .unwrap(); // Iterate over left subtree while right subtree contains 1 byte keys: @@ -687,7 +664,9 @@ mod tests { .unwrap() .expect("cannot commit batch"); - let left = storage.get_storage_context(left_path, None).unwrap(); + let left = storage + .get_transactional_storage_context(left_path, None, &transaction) + .unwrap(); // Iterate over left subtree once again let mut iteration_cost_after = OperationCost::default(); let mut iter = left.raw_iter(); diff --git a/storage/src/rocksdb_storage/storage_context.rs b/storage/src/rocksdb_storage/storage_context.rs index 0611d51c..758ba16f 100644 --- a/storage/src/rocksdb_storage/storage_context.rs +++ b/storage/src/rocksdb_storage/storage_context.rs @@ -30,13 +30,11 @@ mod batch; pub mod context_immediate; -mod context_no_tx; mod context_tx; mod raw_iterator; pub use batch::PrefixedRocksDbBatch; pub use context_immediate::PrefixedRocksDbImmediateStorageContext; -pub use context_no_tx::PrefixedRocksDbStorageContext; pub use context_tx::PrefixedRocksDbTransactionContext; pub use raw_iterator::PrefixedRocksDbRawIterator; diff --git a/storage/src/rocksdb_storage/storage_context/batch.rs b/storage/src/rocksdb_storage/storage_context/batch.rs index bcf58372..29ae31ca 100644 --- a/storage/src/rocksdb_storage/storage_context/batch.rs +++ b/storage/src/rocksdb_storage/storage_context/batch.rs @@ -33,7 +33,7 @@ pub struct PrefixedMultiContextBatchPart { } /// Implementation of a batch outside a transaction -impl<'db> Batch for PrefixedRocksDbBatch<'db> { +impl Batch for PrefixedRocksDbBatch<'_> { fn put>( &mut self, key: K, diff --git a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs b/storage/src/rocksdb_storage/storage_context/context_no_tx.rs deleted file mode 100644 index 80ad0149..00000000 --- a/storage/src/rocksdb_storage/storage_context/context_no_tx.rs +++ /dev/null @@ -1,286 +0,0 @@ -// MIT LICENSE -// -// Copyright (c) 2021 Dash Core Group -// -// Permission is hereby granted, free of charge, to any -// person obtaining a copy of this software and associated -// documentation files (the "Software"), to deal in the -// Software without restriction, including without -// limitation the rights to use, copy, modify, merge, -// publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software -// is furnished to do so, subject to the following -// conditions: -// -// The above copyright notice and this permission notice -// shall be included in all copies or substantial portions -// of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Storage context batch implementation without a transaction - -use error::Error; -use grovedb_costs::{ - storage_cost::key_value_cost::KeyValueStorageCost, ChildrenSizesWithIsSumTree, CostResult, - CostsExt, OperationCost, -}; -use rocksdb::{ColumnFamily, DBRawIteratorWithThreadMode}; - -use super::{batch::PrefixedMultiContextBatchPart, make_prefixed_key, PrefixedRocksDbRawIterator}; -use crate::{ - error, - error::Error::RocksDBError, - rocksdb_storage::storage::{Db, SubtreePrefix, AUX_CF_NAME, META_CF_NAME, ROOTS_CF_NAME}, - StorageBatch, StorageContext, -}; - -/// Storage context with a prefix applied to be used in a subtree to be used -/// outside of transaction. -pub struct PrefixedRocksDbStorageContext<'db> { - storage: &'db Db, - prefix: SubtreePrefix, - batch: Option<&'db StorageBatch>, -} - -impl<'db> PrefixedRocksDbStorageContext<'db> { - /// Create a new prefixed storage_cost context instance - pub fn new(storage: &'db Db, prefix: SubtreePrefix, batch: Option<&'db StorageBatch>) -> Self { - PrefixedRocksDbStorageContext { - storage, - prefix, - batch, - } - } -} - -impl<'db> PrefixedRocksDbStorageContext<'db> { - /// Get auxiliary data column family - fn cf_aux(&self) -> &'db ColumnFamily { - self.storage - .cf_handle(AUX_CF_NAME) - .expect("aux column family must exist") - } - - /// Get trees roots data column family - fn cf_roots(&self) -> &'db ColumnFamily { - self.storage - .cf_handle(ROOTS_CF_NAME) - .expect("roots column family must exist") - } - - /// Get metadata column family - fn cf_meta(&self) -> &'db ColumnFamily { - self.storage - .cf_handle(META_CF_NAME) - .expect("meta column family must exist") - } -} - -impl<'db> StorageContext<'db> for PrefixedRocksDbStorageContext<'db> { - type Batch = PrefixedMultiContextBatchPart; - type RawIterator = PrefixedRocksDbRawIterator>; - - fn put>( - &self, - key: K, - value: &[u8], - children_sizes: ChildrenSizesWithIsSumTree, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - children_sizes, - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn put_aux>( - &self, - key: K, - value: &[u8], - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put_aux( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn put_root>( - &self, - key: K, - value: &[u8], - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put_root( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn put_meta>( - &self, - key: K, - value: &[u8], - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.put_meta( - make_prefixed_key(&self.prefix, key), - value.to_vec(), - cost_info, - ); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete_aux>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete_aux(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete_root>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete_root(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn delete_meta>( - &self, - key: K, - cost_info: Option, - ) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.delete_meta(make_prefixed_key(&self.prefix, key), cost_info); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn get>(&self, key: K) -> CostResult>, Error> { - self.storage - .get(make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn get_aux>(&self, key: K) -> CostResult>, Error> { - self.storage - .get_cf(self.cf_aux(), make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn get_root>(&self, key: K) -> CostResult>, Error> { - self.storage - .get_cf(self.cf_roots(), make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn get_meta>(&self, key: K) -> CostResult>, Error> { - self.storage - .get_cf(self.cf_meta(), make_prefixed_key(&self.prefix, key)) - .map_err(RocksDBError) - .wrap_fn_cost(|value| OperationCost { - seek_count: 1, - storage_loaded_bytes: value - .as_ref() - .ok() - .and_then(Option::as_ref) - .map(|x| x.len() as u64) - .unwrap_or(0), - ..Default::default() - }) - } - - fn new_batch(&self) -> Self::Batch { - PrefixedMultiContextBatchPart { - prefix: self.prefix, - batch: StorageBatch::new(), - } - } - - fn commit_batch(&self, batch: Self::Batch) -> CostResult<(), Error> { - if let Some(existing_batch) = self.batch { - existing_batch.merge(batch.batch); - } - Ok(()).wrap_with_cost(OperationCost::default()) - } - - fn raw_iter(&self) -> Self::RawIterator { - PrefixedRocksDbRawIterator { - prefix: self.prefix, - raw_iterator: self.storage.raw_iterator(), - } - } -} diff --git a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs index 7cc2d1fe..58540ede 100644 --- a/storage/src/rocksdb_storage/storage_context/raw_iterator.rs +++ b/storage/src/rocksdb_storage/storage_context/raw_iterator.rs @@ -46,7 +46,7 @@ pub struct PrefixedRocksDbRawIterator { pub(super) raw_iterator: I, } -impl<'a> RawIterator for PrefixedRocksDbRawIterator> { +impl RawIterator for PrefixedRocksDbRawIterator> { fn seek_to_first(&mut self) -> CostContext<()> { self.raw_iterator.seek(self.prefix); ().wrap_with_cost(OperationCost::with_seek_count(1)) @@ -90,10 +90,9 @@ impl<'a> RawIterator for PrefixedRocksDbRawIterator RawIterator for PrefixedRocksDbRawIterator { /// Storage transaction type type Transaction; - /// Storage context type for mutli-tree batch operations - type BatchStorageContext: StorageContext<'db>; - /// Storage context type for multi-tree batch operations inside transaction type BatchTransactionalStorageContext: StorageContext<'db>; @@ -80,24 +77,6 @@ pub trait Storage<'db> { /// Forces data to be written fn flush(&self) -> Result<(), Error>; - /// Make storage context for a subtree with path, keeping all write - /// operations inside a `batch` if provided. - fn get_storage_context<'b, B>( - &'db self, - path: SubtreePath<'b, B>, - batch: Option<&'db StorageBatch>, - ) -> CostContext - where - B: AsRef<[u8]> + 'b; - - /// Make storage context for a subtree with prefix, keeping all write - /// operations inside a `batch` if provided. - fn get_storage_context_by_subtree_prefix( - &'db self, - prefix: SubtreePrefix, - batch: Option<&'db StorageBatch>, - ) -> CostContext; - /// Make context for a subtree on transactional data, keeping all write /// operations inside a `batch` if provided. fn get_transactional_storage_context<'b, B>( @@ -344,8 +323,8 @@ impl StorageBatch { } } - #[cfg(test)] - pub(crate) fn len(&self) -> usize { + /// Get batch length + pub fn len(&self) -> usize { let operations = self.operations.borrow(); operations.data.len() + operations.roots.len() @@ -353,6 +332,11 @@ impl StorageBatch { + operations.meta.len() } + /// Batch emptiness predicate + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + /// Add deferred `put` operation pub(crate) fn put( &self,