diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 028acd2..1f2451c 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -70,5 +70,27 @@ jobs: toolchain: ${{ matrix.toolchain }} components: rustfmt, clippy + - name: Setup just + uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 + - name: Run Tests - run: cargo +${{ matrix.toolchain }} test --all-features + run: just test-${{ matrix.toolchain }} + + no-std: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + with: + persist-credentials: false + + - name: Install MSRV Rust toolchain + uses: dtolnay/rust-toolchain@f7ccc83f9ed1e5b9c81d8a67d7ad1a747e22a561 + with: + toolchain: "1.74.0" + # Common bare-metal Cortex-M target (no_std: `core` + `alloc`). + targets: thumbv7em-none-eabi + + - name: Build for no_std target + shell: bash + run: | + cargo +1.74.0 build --lib --target thumbv7em-none-eabi --no-default-features diff --git a/Cargo.toml b/Cargo.toml index 1edd237..3e89ca3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,9 +12,11 @@ rust-version = "1.74.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -bitcoin_hashes = "0.19" -hex-conservative = "1.0.0" +bitcoin_hashes = { version = "0.19", default-features = false } +hex-conservative = { version = "1.0.0", default-features = false } serde = { version = "1.0", features = ["derive"], optional = true } +hashbrown = "0.16.1" +bitcoin-io = { version = "0.3.0", default-features = false, features = ["alloc"] } [dev-dependencies] serde = { version = "1.0", features = ["derive"] } @@ -23,8 +25,9 @@ criterion = { version = ">=0.5.1", features = ["html_reports"] } rand = "0.9.2" [features] +default = ["std"] +std = ["bitcoin_hashes/std", "hex-conservative/std"] with-serde = ["serde"] -default = [] [lints.clippy] use_self = "warn" @@ -40,6 +43,8 @@ name = "proof-update" [[example]] name = "custom-hash-type" +# Compile AccumulatorHash with the `std::io::{Read, Write}` trait bounds +required-features = ["std"] [[bench]] name = "accumulator_benchmarks" diff --git a/justfile b/justfile index 879c5db..e75b107 100644 --- a/justfile +++ b/justfile @@ -12,17 +12,32 @@ build: # Check code formatting, compilation and linting check: cargo +nightly fmt --all --check - cargo +nightly check --all-features --all-targets --tests --benches - cargo +nightly clippy --all-features --all-targets --tests --benches -- -D warnings + + cargo +nightly check --no-default-features --all-targets + cargo +nightly check --all-features --all-targets + cargo +nightly clippy --no-default-features --all-targets -- -D warnings + cargo +nightly clippy --all-features --all-targets -- -D warnings + RUSTDOCFLAGS="-D warnings" cargo +nightly doc --no-deps --all-features # Format code fmt: cargo +nightly fmt --all -# Run all tests +# Run all tests with stable and nightly toolchains test: - cargo test --all-features + @just test-stable + @just test-nightly + +# Run all tests with a stable toolchain +test-stable: + cargo +stable test --no-default-features + cargo +stable test --all-features + +# Run all tests with a nightly toolchain +test-nightly: + cargo +nightly test --no-default-features + cargo +nightly test --all-features # Run all tests on MSRV (1.74.0) test-msrv: @@ -33,7 +48,7 @@ test-msrv: cargo update -p serde_json --precise 1.0.81 cargo update -p serde --precise 1.0.160 cargo update -p half --precise 2.4.1 - + + cargo +1.74.0 test --no-default-features cargo +1.74.0 test --all-features rm Cargo.lock - diff --git a/src/accumulator/mem_forest.rs b/src/accumulator/mem_forest.rs index dea4c36..7c536d7 100644 --- a/src/accumulator/mem_forest.rs +++ b/src/accumulator/mem_forest.rs @@ -25,16 +25,14 @@ //! assert_eq!(p.get_roots()[0].get_data(), BitcoinNodeHash::default()); //! ``` -use std::cell::Cell; -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Debug; -use std::fmt::Display; -use std::fmt::Formatter; -use std::io::Read; -use std::io::Write; -use std::rc::Rc; -use std::rc::Weak; +use alloc::rc::Rc; +use alloc::rc::Weak; +use core::cell::Cell; +use core::cell::RefCell; +use core::fmt; +use core::fmt::Debug; +use core::fmt::Display; +use core::fmt::Formatter; use super::node_hash::AccumulatorHash; use super::node_hash::BitcoinNodeHash; @@ -48,6 +46,7 @@ use super::util::max_position_at_row; use super::util::right_child; use super::util::root_position; use super::util::tree_rows; +use crate::prelude::*; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum NodeType { @@ -84,7 +83,7 @@ impl Node { /// Writes one node to the writer, this method will recursively write all children. /// The primary use of this method is to serialize the accumulator. In this case, /// you should call this method on each root in the forest. - pub fn write_one(&self, writer: &mut W) -> std::io::Result<()> { + pub fn write_one(&self, writer: &mut W) -> io::Result<()> { match self.ty { NodeType::Branch => writer.write_all(&0_u64.to_le_bytes())?, NodeType::Leaf => writer.write_all(&1_u64.to_le_bytes())?, @@ -124,14 +123,12 @@ impl Node { /// you should call this method on each root in the forest, assuming you know how /// many roots there are. #[allow(clippy::type_complexity)] - pub fn read_one( - reader: &mut R, - ) -> std::io::Result<(Rc, HashMap>)> { - fn _read_one( + pub fn read_one(reader: &mut R) -> io::Result<(Rc, HashMap>)> { + fn _read_one( ancestor: Option>>, reader: &mut R, index: &mut HashMap>>, - ) -> std::io::Result>> { + ) -> io::Result>> { let mut ty = [0u8; 8]; reader.read_exact(&mut ty)?; let data = Hash::read(reader)?; @@ -253,7 +250,7 @@ impl MemForest { /// vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] /// ); /// ``` - pub fn serialize(&self, mut writer: W) -> std::io::Result<()> { + pub fn serialize(&self, mut writer: W) -> io::Result<()> { writer.write_all(&self.leaves.to_le_bytes())?; writer.write_all(&(self.roots.len() as u64).to_le_bytes())?; @@ -267,17 +264,16 @@ impl MemForest { /// Deserializes a MemForest from a reader. /// # Example /// ``` - /// use std::io::Cursor; - /// /// use rustreexo::accumulator::mem_forest::MemForest; /// use rustreexo::accumulator::node_hash::BitcoinNodeHash; + /// use rustreexo::prelude::io::Cursor; /// let mut serialized = Cursor::new(vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); /// let MemForest = MemForest::::deserialize(&mut serialized).unwrap(); /// assert_eq!(MemForest.leaves, 0); /// assert_eq!(MemForest.get_roots().len(), 0); /// ``` - pub fn deserialize(mut reader: R) -> std::io::Result { - fn read_u64(reader: &mut R) -> std::io::Result { + pub fn deserialize(mut reader: R) -> io::Result { + fn read_u64(reader: &mut R) -> io::Result { let mut buf = [0u8; 8]; reader.read_exact(&mut buf)?; Ok(u64::from_le_bytes(buf)) @@ -675,29 +671,29 @@ impl MemForest { } impl Debug for MemForest { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { write!(f, "{}", self.string()) } } impl Display for MemForest { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { write!(f, "{}", self.string()) } } #[cfg(test)] mod test { - use std::convert::TryFrom; - use std::rc::Rc; - use std::str::FromStr; - use std::vec; + use core::convert::TryFrom; + use core::str::FromStr; use bitcoin_hashes::sha256::Hash as Data; use bitcoin_hashes::HashEngine; + use io::Cursor; use serde::Deserialize; use super::MemForest; + use super::*; use crate::accumulator::mem_forest::Node; use crate::accumulator::node_hash::AccumulatorHash; use crate::accumulator::node_hash::BitcoinNodeHash; @@ -926,11 +922,10 @@ mod test { deletion_tests: Vec, } - let contents = std::fs::read_to_string("test_values/test_cases.json") - .expect("Something went wrong reading the file"); + let contents = include_str!("../../test_values/test_cases.json"); - let tests = serde_json::from_str::(contents.as_str()) - .expect("JSON deserialization error"); + let tests = + serde_json::from_str::(contents).expect("JSON deserialization error"); for i in tests.insertion_tests { run_single_addition_case(i); @@ -999,11 +994,10 @@ mod test { let mut p = MemForest::new(); p.modify(&hashes, &[]).expect("Test mem_forests are valid"); p.modify(&[], &[hashes[0]]).expect("can remove 0"); - let mut writer = std::io::Cursor::new(Vec::new()); + let mut writer = Vec::new(); p.get_roots()[0].write_one(&mut writer).unwrap(); let (deserialized, _) = - Node::::read_one(&mut std::io::Cursor::new(writer.into_inner())) - .unwrap(); + Node::::read_one(&mut Cursor::new(writer)).unwrap(); assert_eq!(deserialized.get_data(), p.get_roots()[0].get_data()); } @@ -1013,12 +1007,10 @@ mod test { let mut p = MemForest::new(); p.modify(&hashes, &[]).expect("Test mem_forests are valid"); p.modify(&[], &[hashes[0]]).expect("can remove 0"); - let mut writer = std::io::Cursor::new(Vec::new()); + let mut writer = Vec::new(); p.serialize(&mut writer).unwrap(); - let deserialized = MemForest::::deserialize(&mut std::io::Cursor::new( - writer.into_inner(), - )) - .unwrap(); + let deserialized = + MemForest::::deserialize(&mut Cursor::new(writer)).unwrap(); assert_eq!( deserialized.get_roots()[0].get_data(), p.get_roots()[0].get_data() diff --git a/src/accumulator/node_hash.rs b/src/accumulator/node_hash.rs index 68b9d90..f97ec55 100644 --- a/src/accumulator/node_hash.rs +++ b/src/accumulator/node_hash.rs @@ -45,11 +45,14 @@ //! .unwrap(); //! assert_eq!(parent, expected_parent); //! ``` -use std::convert::TryFrom; -use std::fmt::Debug; -use std::fmt::Display; -use std::ops::Deref; -use std::str::FromStr; + +use core::convert::TryFrom; +use core::fmt; +use core::fmt::Debug; +use core::fmt::Display; +use core::hash::Hash; +use core::ops::Deref; +use core::str::FromStr; use bitcoin_hashes::sha256; use bitcoin_hashes::sha512_256; @@ -61,20 +64,20 @@ use serde::Deserialize; #[cfg(feature = "with-serde")] use serde::Serialize; -pub trait AccumulatorHash: - Copy + Clone + Ord + Debug + Display + std::hash::Hash + Default + 'static -{ +use crate::prelude::*; + +pub trait AccumulatorHash: Copy + Clone + Ord + Debug + Display + Hash + Default + 'static { fn is_empty(&self) -> bool; fn empty() -> Self; fn is_placeholder(&self) -> bool; fn placeholder() -> Self; fn parent_hash(left: &Self, right: &Self) -> Self; - fn write(&self, writer: &mut W) -> std::io::Result<()> + fn write(&self, writer: &mut W) -> io::Result<()> where - W: std::io::Write; - fn read(reader: &mut R) -> std::io::Result + W: Write; + fn read(reader: &mut R) -> io::Result where - R: std::io::Read; + R: Read; } #[derive(Eq, PartialEq, Copy, Clone, Hash, PartialOrd, Ord)] @@ -112,7 +115,7 @@ impl Deref for BitcoinNodeHash { } impl Display for BitcoinNodeHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { if let Self::Some(ref inner) = self { let mut s = String::new(); for byte in inner.iter() { @@ -126,7 +129,7 @@ impl Display for BitcoinNodeHash { } impl Debug for BitcoinNodeHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { match self { Self::Empty => write!(f, "empty"), Self::Placeholder => write!(f, "placeholder"), @@ -278,9 +281,9 @@ impl AccumulatorHash for BitcoinNodeHash { } /// write to buffer - fn write(&self, writer: &mut W) -> std::io::Result<()> + fn write(&self, writer: &mut W) -> io::Result<()> where - W: std::io::Write, + W: Write, { match self { Self::Empty => writer.write_all(&[0]), @@ -293,9 +296,9 @@ impl AccumulatorHash for BitcoinNodeHash { } /// Read from buffer - fn read(reader: &mut R) -> std::io::Result + fn read(reader: &mut R) -> io::Result where - R: std::io::Read, + R: Read, { let mut tag = [0]; reader.read_exact(&mut tag)?; @@ -308,8 +311,8 @@ impl AccumulatorHash for BitcoinNodeHash { Ok(Self::Some(hash)) } [_] => { - let err = std::io::Error::new( - std::io::ErrorKind::InvalidData, + let err = io::Error::new( + io::ErrorKind::InvalidData, "unexpected tag for AccumulatorHash", ); Err(err) @@ -320,7 +323,8 @@ impl AccumulatorHash for BitcoinNodeHash { #[cfg(test)] mod test { - use std::str::FromStr; + use alloc::string::ToString; + use core::str::FromStr; use super::AccumulatorHash; use crate::accumulator::node_hash::BitcoinNodeHash; diff --git a/src/accumulator/pollard.rs b/src/accumulator/pollard.rs index a494a06..589c265 100644 --- a/src/accumulator/pollard.rs +++ b/src/accumulator/pollard.rs @@ -1,47 +1,50 @@ -/// Pollard is an efficient implementation of the accumulator for keeping track of a subset of the -/// whole tree. Instead of storing a proof for some leaves, it is more efficient to hold them in a -/// tree structure, and add/remove elements as needed. The main use-case for a Pollard is to keep -/// track of unconfirmed transactions' proof, in the mempool. As you get new transactions through -/// the p2p network, you check the proofs and add them to the Pollard. When a block is mined, we -/// can remove the confirmed transactions from the Pollard, and keep the unconfirmed ones. We can -/// also serve proofs for specific transactions as requested, allowing efficient transaction relay. -/// -/// This implementation is close to the one in `MemForest`, but it is specialized in keeping track -/// of subsets of the whole tree, allowing you to cache and uncache elements as needed. While the -/// MemForest keeps everything in the accumulator, and may take a lot of memory. -/// -/// Nodes are kept in memory, and they hold their hashes, a reference to their **aunt** (not -/// parent!), and their nieces (not children!). We do this to allow for proof generation, while -/// prunning as much as possible. In a merkle proof, we only need the sibling of the path to the -/// root, the parent is always computed on the fly as we walk up the tree. Some there's no need to -/// keep the parent. But we need the aunt (the sibling of the parent) to generate the proof. -/// -/// Every node is owned by exactly one other node, the ancestor - With the only exception being the -/// roots, which are owned by the Pollard itself. This almost garantees that we can't have a memory -/// leak, as deleting one node will delete all of its descendants. The only way to have a memory -/// leak is if we have a cycle in the tree, which we avoid by only allowing Weak references everywhere, -/// except for the owner of the node. Things are kept in a [Rc] to allow for multiple references to -/// the same node, as we may need to operate on it, and also to allow the nieces to have a reference -/// to their aunt. It could be done with pointers, but it would be more complex and error-prone. The -/// [Rc]s live inside a [RefCell], to allow for interior mutability, as we may need to change the -/// values inside a node. Make sure to avoid leaking a reference to the inner [RefCell] to the outside -/// world, as it may cause race conditions and panics. Every time we use a reference to the inner -/// [RefCell], we make sure to drop it as soon as possible, and that we are the only ones operating -/// on it at that time. For this reason, a [Pollard] is not [Sync], and you'll need to use a [Mutex] -/// or something similar to share it between threads. But it is [Send], as it is safe to send it to -/// another thread - everything is owned by the Pollard and lives on the heap. -/// -/// ## Usage -/// -/// //TODO: Add usage examples -use std::cell::Cell; -use std::cell::RefCell; -use std::collections::HashMap; -use std::convert::TryInto; -use std::fmt::Debug; -use std::fmt::Display; -use std::rc::Rc; -use std::rc::Weak; +//! Pollard is an efficient implementation of the accumulator for keeping track of a subset of the +//! whole tree. Instead of storing a proof for some leaves, it is more efficient to hold them in a +//! tree structure, and add/remove elements as needed. The main use-case for a Pollard is to keep +//! track of unconfirmed transactions' proof, in the mempool. As you get new transactions through +//! the p2p network, you check the proofs and add them to the Pollard. When a block is mined, we +//! can remove the confirmed transactions from the Pollard, and keep the unconfirmed ones. We can +//! also serve proofs for specific transactions as requested, allowing efficient transaction relay. +//! +//! This implementation is close to the one in `MemForest`, but it is specialized in keeping track +//! of subsets of the whole tree, allowing you to cache and uncache elements as needed. While the +//! MemForest keeps everything in the accumulator, and may take a lot of memory. +//! +//! Nodes are kept in memory, and they hold their hashes, a reference to their **aunt** (not +//! parent!), and their nieces (not children!). We do this to allow for proof generation, while +//! prunning as much as possible. In a merkle proof, we only need the sibling of the path to the +//! root, the parent is always computed on the fly as we walk up the tree. Some there's no need to +//! keep the parent. But we need the aunt (the sibling of the parent) to generate the proof. +//! +//! Every node is owned by exactly one other node, the ancestor - With the only exception being the +//! roots, which are owned by the Pollard itself. This almost garantees that we can't have a memory +//! leak, as deleting one node will delete all of its descendants. The only way to have a memory +//! leak is if we have a cycle in the tree, which we avoid by only allowing Weak references everywhere, +//! except for the owner of the node. Things are kept in a [Rc] to allow for multiple references to +//! the same node, as we may need to operate on it, and also to allow the nieces to have a reference +//! to their aunt. It could be done with pointers, but it would be more complex and error-prone. The +//! [Rc]s live inside a [RefCell], to allow for interior mutability, as we may need to change the +//! values inside a node. Make sure to avoid leaking a reference to the inner [RefCell] to the outside +//! world, as it may cause race conditions and panics. Every time we use a reference to the inner +//! [RefCell], we make sure to drop it as soon as possible, and that we are the only ones operating +//! on it at that time. For this reason, a [Pollard] is not [Sync], and you'll need to use a `Mutex` +//! or something similar to share it between threads. But it is [Send], as it is safe to send it to +//! another thread - everything is owned by the Pollard and lives on the heap. +//! +//! ## Usage +//! +//! //TODO: Add usage examples + +use alloc::rc::Rc; +use alloc::rc::Weak; +use core::array; +use core::cell::Cell; +use core::cell::RefCell; +use core::convert::TryInto; +use core::fmt; +use core::fmt::Debug; +use core::fmt::Display; +use core::mem; use super::node_hash::AccumulatorHash; use super::proof::Proof; @@ -57,6 +60,7 @@ use super::util::parent; use super::util::right_child; use super::util::root_position; use super::util::tree_rows; +use crate::prelude::*; #[derive(Default, Clone)] /// A node in the Pollard tree @@ -125,7 +129,7 @@ pub enum PollardError { InvalidProof, /// We've had some I/O error while serializing or deserializing the forest - IO(std::io::Error), + IO(io::Error), /// We couldn't upgrade a node to a root /// @@ -163,8 +167,8 @@ pub enum PollardError { RootNotFound, } -impl From for PollardError { - fn from(err: std::io::Error) -> Self { +impl From for PollardError { + fn from(err: io::Error) -> Self { Self::IO(err) } } @@ -177,7 +181,7 @@ impl PartialEq for PollardError { } impl Debug for PollardError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::NodeNotFound(hash) => write!(f, "Node not found: {hash}"), Self::PositionNotFound(pos) => write!(f, "Position not found: {pos}"), @@ -197,14 +201,14 @@ impl Debug for PollardError { } } -impl std::fmt::Display for PollardError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for PollardError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self:?}") } } impl Debug for PollardNode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.hash().to_string()) } } @@ -230,7 +234,7 @@ impl PollardNode { }) } - fn serialize(&self, writer: &mut W) -> Result<(), PollardError> { + fn serialize(&self, writer: &mut W) -> Result<(), PollardError> { let is_leaf = self.left_niece().is_none() as u8; writer.write_all(&is_leaf.to_be_bytes())?; @@ -250,7 +254,7 @@ impl PollardNode { Ok(()) } - fn deserialize( + fn deserialize( reader: &mut R, ancestor: Option>, leaf_map: &mut HashMap>, @@ -493,11 +497,11 @@ impl PollardNode { /// node's children. This function swaps the nieces of this node with the nieces of the provided /// node. fn swap_nieces(&self, other: &Self) { - std::mem::swap( + mem::swap( &mut *self.left_niece.borrow_mut(), &mut *other.left_niece.borrow_mut(), ); - std::mem::swap( + mem::swap( &mut *self.right_niece.borrow_mut(), &mut *other.right_niece.borrow_mut(), ); @@ -568,13 +572,13 @@ impl PartialEq for Pollard { impl Eq for Pollard {} impl Debug for Pollard { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.string()) } } impl Display for Pollard { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.string()) } } @@ -761,7 +765,7 @@ impl Pollard { /// Creates a new empty [Pollard] pub fn new() -> Self { - let roots: [Option>>; 64] = std::array::from_fn(|_| None); + let roots: [Option>>; 64] = array::from_fn(|_| None); Self { roots, leaves: 0, @@ -799,12 +803,12 @@ impl Pollard { /// Serializes the [Pollard] into a sync /// - /// This function serializes the [Pollard] into a sync that implements [std::io::Write]. This will be + /// This function serializes the [Pollard] into a sync that implements [Write]. This will be /// serialized in a compact binary format, so it can be stored in a file or sent over the /// network. This function will return an error if it fails to write to the sync. /// /// To deserialize the [Pollard] back, you can use the `deserialize` function. - pub fn serialize(&self, writer: &mut W) -> Result<(), PollardError> { + pub fn serialize(&self, writer: &mut W) -> Result<(), PollardError> { writer.write_all(&self.leaves.to_be_bytes())?; for root in self.roots.iter() { @@ -826,9 +830,9 @@ impl Pollard { /// Deserializes a [Pollard] from a stream /// - /// This function deserializes a [Pollard] from a stream that implements [std::io::Read]. This stream + /// This function deserializes a [Pollard] from a stream that implements [Read]. This stream /// should contain a [Pollard] serialized with the `serialize` function. - pub fn deserialize(reader: &mut R) -> Result> { + pub fn deserialize(reader: &mut R) -> Result> { let mut leaves = [0u8; 8]; reader.read_exact(&mut leaves)?; let leaves = u64::from_be_bytes(leaves); @@ -1095,7 +1099,7 @@ impl Pollard { let mut roots_to_destroy = Vec::new(); while self.leaves >> row & 1 == 1 { - let old_root = std::mem::take(&mut self.roots[row as usize]).expect("Root not found"); + let old_root = mem::take(&mut self.roots[row as usize]).expect("Root not found"); let pos = root_position(self.leaves(), row, tree_rows(self.leaves())); add_positions.push((pos, old_root.hash())); @@ -1264,7 +1268,7 @@ impl From> for Pollard { #[cfg(test)] mod tests { - use std::str::FromStr; + use core::str::FromStr; use serde::Deserialize; @@ -1435,11 +1439,10 @@ mod tests { deletion_tests: Vec, } - let contents = std::fs::read_to_string("test_values/test_cases.json") - .expect("Something went wrong reading the file"); + let contents = include_str!("../../test_values/test_cases.json"); - let tests = serde_json::from_str::(contents.as_str()) - .expect("JSON deserialization error"); + let tests = + serde_json::from_str::(contents).expect("JSON deserialization error"); for i in tests.insertion_tests { run_single_addition_case(i); diff --git a/src/accumulator/proof.rs b/src/accumulator/proof.rs index b586fcc..1aa3393 100644 --- a/src/accumulator/proof.rs +++ b/src/accumulator/proof.rs @@ -54,9 +54,10 @@ //! // Verify the proof //! assert!(s.verify(&p, &vec![hashes[0]]).expect("This proof is valid")); //! ``` -use std::collections::HashMap; -use std::io::Read; -use std::io::Write; + +use alloc::vec::IntoIter; +use core::fmt::Debug; +use core::iter::Peekable; #[cfg(feature = "with-serde")] use serde::Deserialize; @@ -70,6 +71,7 @@ use super::util; use super::util::get_proof_positions; use super::util::read_u64; use super::util::tree_rows; +use crate::prelude::*; #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(feature = "with-serde", derive(Serialize, Deserialize))] @@ -274,7 +276,7 @@ impl Proof { return Ok(true); } - let mut calculated_roots: std::iter::Peekable> = self + let mut calculated_roots: Peekable> = self .calculate_hashes(del_hashes, num_leaves)? .1 .into_iter() @@ -368,7 +370,7 @@ impl Proof { /// serialized_proof /// ); /// ``` - pub fn serialize(&self, mut writer: W) -> std::io::Result { + pub fn serialize(&self, mut writer: W) -> io::Result { let mut len = 16; writer.write_all(&(self.targets.len() as u64).to_le_bytes())?; for target in &self.targets { @@ -386,11 +388,10 @@ impl Proof { /// Deserializes a proof from a byte array. /// # Example /// ``` - /// use std::io::Cursor; - /// /// use rustreexo::accumulator::node_hash::BitcoinNodeHash; /// use rustreexo::accumulator::proof::Proof; /// use rustreexo::accumulator::stump::Stump; + /// use rustreexo::prelude::io::Cursor; /// let proof = Cursor::new(vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); /// let deserialized_proof = Proof::::deserialize(proof).unwrap(); /// // An empty proof is only 16 bytes of zeros, meaning no targets and no hashes @@ -888,11 +889,11 @@ impl Proof { #[cfg(test)] mod tests { - use std::str::FromStr; + use core::str::FromStr; use serde::Deserialize; - use super::Proof; + use super::*; use crate::accumulator::node_hash::AccumulatorHash; use crate::accumulator::node_hash::BitcoinNodeHash; use crate::accumulator::stump::Stump; @@ -954,11 +955,10 @@ mod tests { /// After we update the proof, the cached hashes should be this expected_cached_hashes: Vec, } - let contents = std::fs::read_to_string("test_values/cached_proof_tests.json") - .expect("Something went wrong reading the file"); + let contents = include_str!("../../test_values/cached_proof_tests.json"); let values: Vec = - serde_json::from_str(contents.as_str()).expect("JSON deserialization error"); + serde_json::from_str(contents).expect("JSON deserialization error"); for case_values in values { let proof_hashes = case_values .cached_proof @@ -1473,11 +1473,10 @@ mod tests { #[test] fn test_proof_verify() { - let contents = std::fs::read_to_string("test_values/test_cases.json") - .expect("Something went wrong reading the file"); + let contents = include_str!("../../test_values/test_cases.json"); let values: serde_json::Value = - serde_json::from_str(contents.as_str()).expect("JSON deserialization error"); + serde_json::from_str(contents).expect("JSON deserialization error"); let tests = values["proof_tests"].as_array().unwrap(); for test in tests { run_single_case(test); diff --git a/src/accumulator/stump.rs b/src/accumulator/stump.rs index c337bb4..da6603d 100644 --- a/src/accumulator/stump.rs +++ b/src/accumulator/stump.rs @@ -26,10 +26,7 @@ //! assert_eq!(s.unwrap().0.roots, utxos); //! ``` -use std::collections::BTreeSet; -use std::io::Read; -use std::io::Write; -use std::vec; +use alloc::collections::BTreeSet; #[cfg(feature = "with-serde")] use serde::Deserialize; @@ -41,6 +38,7 @@ use super::node_hash::BitcoinNodeHash; use super::proof::NodesAndRootsOldNew; use super::proof::Proof; use super::util; +use crate::prelude::*; #[derive(Debug, Clone, Default)] pub struct UpdateData { @@ -63,15 +61,15 @@ pub enum StumpError { /// An IO error occurred, this is usually due to a failure in reading or writing /// the Stump to a reader/writer. This error will be returned during (de)serialization. - Io(std::io::ErrorKind), + Io(io::ErrorKind), /// The provided proof is invalid. This will happen during proof verification and stump /// modification. InvalidProof(String), } -impl From for StumpError { - fn from(err: std::io::Error) -> Self { +impl From for StumpError { + fn from(err: io::Error) -> Self { Self::Io(err.kind()) } } @@ -259,12 +257,13 @@ impl Stump { /// use rustreexo::accumulator::node_hash::BitcoinNodeHash; /// use rustreexo::accumulator::proof::Proof; /// use rustreexo::accumulator::stump::Stump; + /// use rustreexo::prelude::io::Cursor; /// let buffer = vec![ /// 8, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 150, 124, 244, 241, 98, 69, 217, 222, /// 235, 97, 61, 137, 135, 76, 197, 134, 232, 173, 253, 8, 28, 17, 124, 123, 16, 4, 66, 30, 63, /// 113, 246, 74, /// ]; - /// let mut buffer = std::io::Cursor::new(buffer); + /// let mut buffer = Cursor::new(buffer); /// let hashes = [0, 1, 2, 3, 4, 5, 6, 7] /// .iter() /// .map(|&el| BitcoinNodeHash::from([el; 32])) @@ -368,15 +367,15 @@ impl Stump { #[cfg(test)] mod test { - use std::fmt::Display; - use std::io::Read; - use std::io::Write; - use std::str::FromStr; - use std::vec; + use core::fmt; + use core::fmt::Display; + use core::str::FromStr; + use io::Cursor; use serde::Deserialize; use super::Stump; + use super::*; use crate::accumulator::node_hash::AccumulatorHash; use crate::accumulator::node_hash::BitcoinNodeHash; use crate::accumulator::proof::Proof; @@ -404,7 +403,7 @@ mod test { struct CustomHash([u8; 32]); impl Display for CustomHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.0) } } @@ -424,7 +423,7 @@ mod test { } Self(hash) } - fn read(reader: &mut R) -> std::io::Result { + fn read(reader: &mut R) -> io::Result { let mut hash = [0; 32]; reader .read_exact(&mut hash) @@ -432,7 +431,7 @@ mod test { .unwrap(); Ok(Self(hash)) } - fn write(&self, writer: &mut W) -> std::io::Result<()> { + fn write(&self, writer: &mut W) -> io::Result<()> { writer .write_all(&self.0) .map_err(|e| e.to_string()) @@ -492,11 +491,10 @@ mod test { new_del_hashes: Vec, to_destroy: Vec, } - let contents = std::fs::read_to_string("test_values/update_data_tests.json") - .expect("Something went wrong reading the file"); + let contents = include_str!("../../test_values/update_data_tests.json"); - let tests = serde_json::from_str::>(contents.as_str()) - .expect("JSON deserialization error"); + let tests = + serde_json::from_str::>(contents).expect("JSON deserialization error"); for data in tests { let roots = data @@ -706,7 +704,7 @@ mod test { let mut writer = Vec::new(); stump.serialize(&mut writer).unwrap(); - let mut reader = std::io::Cursor::new(writer); + let mut reader = Cursor::new(writer); let stump2 = Stump::deserialize(&mut reader).unwrap(); assert_eq!(stump, stump2); } @@ -719,11 +717,10 @@ mod test { deletion_tests: Vec, } - let contents = std::fs::read_to_string("test_values/test_cases.json") - .expect("Something went wrong reading the file"); + let contents = include_str!("../../test_values/test_cases.json"); - let tests = serde_json::from_str::(contents.as_str()) - .expect("JSON deserialization error"); + let tests = + serde_json::from_str::(contents).expect("JSON deserialization error"); for i in tests.insertion_tests { run_single_addition_case(i); diff --git a/src/accumulator/util.rs b/src/accumulator/util.rs index 71080ac..3b342be 100644 --- a/src/accumulator/util.rs +++ b/src/accumulator/util.rs @@ -1,9 +1,8 @@ -use std::collections::BTreeSet; -use std::collections::HashSet; -use std::io::Read; +use alloc::collections::BTreeSet; // Rustreexo use super::node_hash::AccumulatorHash; +use crate::prelude::*; // isRootPosition checks if the current position is a root given the number of // leaves and the entire rows of the forest. @@ -223,7 +222,7 @@ pub fn parent(pos: u64, forest_rows: u8) -> u64 { (pos >> 1) | (1 << forest_rows) } -pub fn read_u64(buf: &mut Source) -> Result { +pub fn read_u64(buf: &mut Source) -> Result { let mut bytes = [0u8; 8]; buf.read_exact(&mut bytes)?; Ok(u64::from_le_bytes(bytes)) @@ -351,8 +350,9 @@ pub fn hash_from_u8(value: u8) -> super::node_hash::BitcoinNodeHash { #[cfg(test)] mod tests { - use std::str::FromStr; - use std::vec; + use alloc::vec; + use alloc::vec::Vec; + use core::str::FromStr; use super::roots_to_destroy; use crate::accumulator::node_hash::BitcoinNodeHash; diff --git a/src/lib.rs b/src/lib.rs index d369fcd..4597242 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,4 +14,36 @@ //! For more information, check each module's documentation. #![cfg_attr(any(bench), feature(test))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +#[cfg(not(feature = "std"))] +/// Re-exports `alloc`/`std` basics plus HashMap/HashSet and IO traits. +pub mod prelude { + pub use alloc::borrow::ToOwned; + pub use alloc::format; + pub use alloc::string::String; + pub use alloc::string::ToString; + pub use alloc::vec; // brings `vec!` into scope + pub use alloc::vec::Vec; + + pub use bitcoin_io as io; + pub use bitcoin_io::Read; + pub use bitcoin_io::Write; + pub use hashbrown::HashMap; + pub use hashbrown::HashSet; +} + +#[cfg(feature = "std")] +/// Re-exports `alloc`/`std` basics plus HashMap/HashSet and IO traits. +pub mod prelude { + extern crate std; + pub use std::collections::HashMap; + pub use std::collections::HashSet; + pub use std::io; + pub use std::io::Read; + pub use std::io::Write; +} + pub mod accumulator;