diff --git a/Cargo.toml b/Cargo.toml index 84d729f..02bfa30 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,9 +9,14 @@ members = [ "crates/accumulator", "crates/vlc", "crates/cops", + "crates/vrf", + "crates/crypto", + "crates/enclaves", + "crates/types", "demos/test_conflict", - "demos/coll-tx", - "demos/vlc-dag" + "demos/coll_tx", + "demos/vlc_dag", + "demos/tee_vlc", ] [profile.release] @@ -37,4 +42,3 @@ tokio-util = "0.7.10" [lib] name = "chronos" path = "src/lib.rs" - diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ae419d5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 hetu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/README.md b/crates/README.md index 096215e..e9584bf 100644 --- a/crates/README.md +++ b/crates/README.md @@ -20,3 +20,16 @@ The crates folder of Chronos includes core functional code crates and utility li - The data store maintains a set of key-value pairs. - It provides causal consistency to clients. +## [enclaves](./enclaves/) + +- This module provides some common utilities of TEE (Trusted Execution Environment) Enclaves. +- For examples: AWS nitro enclave, Mircosoft Azure, Intel SGX, etc. + +## [crypto](./crypto/) + +- Some common crypto utilities, signatures, verify, and hash functions for elliptic curve. + +## [vrf](./vrf/) + +- This module contains implementations of a [verifiable random function](https://en.wikipedia.org/wiki/Verifiable_random_function), currently only ECVRF. +- VRFs can be used in the consensus protocol for leader election. \ No newline at end of file diff --git a/crates/crypto/Cargo.toml b/crates/crypto/Cargo.toml new file mode 100644 index 0000000..a1f188a --- /dev/null +++ b/crates/crypto/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "crypto" +version = "0.1.0" +edition = "2021" + +[dependencies] +derive_more = "0.99.17" +derive-where = "1.2.7" +bincode = "1.3.3" +serde = { version = "1.0.195", features = ["derive"] } +schnorrkel = { version = "0.11.4", features = ["serde"] } +secp256k1 = { version = "0.29.0", features = ["rand-std", "serde", "recovery"] } +nix = { version = "0.28.0", features = ["socket", "sched", "resource"] } +serde_json = "1.0.114" +sha2 = "0.10.8" +sha3 = "0.10.1" +hex = "0.4.3" +blake2 = "0.10.6" +rand = "0.8.5" +primitive-types = { version = "0.12.2", features = ["serde"] } +anyhow = { version = "1.0.79", features = ["backtrace"] } \ No newline at end of file diff --git a/crates/crypto/src/core.rs b/crates/crypto/src/core.rs new file mode 100644 index 0000000..8382130 --- /dev/null +++ b/crates/crypto/src/core.rs @@ -0,0 +1,536 @@ +use std::hash::{Hash, Hasher}; + +use blake2::Blake2b; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use rand::{rngs::OsRng, Rng}; + + +// Hashed based digest deriving solution +// There's no well known solution for deriving digest methods for general +// structural data i.e. structs and enums (as far as I know), which means to +// compute digest for a structural data e.g. message type, one has to do either: +// specify the traversal manually +// derive `Hash` and make use of it +// derive `Serialize` and make use of it +// derive `BorshSerialize`, which is similar to `Serialize` but has been +// claimed to be specially designed for this use case +// currently the second approach is take. the benefit is `Hash` semantic +// guarantees the desired reproducibility, and the main problem is the lack of +// cross-platform compatibility, which is hardly concerned in this codebase +// since it is written for benchmarks performed on unified systems and machines. +// nevertheless, I manually addressed the endianness problem below + +pub trait DigestHasher { + fn write(&mut self, bytes: &[u8]); +} + +impl DigestHasher for Sha256 { + fn write(&mut self, bytes: &[u8]) { + self.update(bytes) + } +} + +impl DigestHasher for Blake2b { + fn write(&mut self, bytes: &[u8]) { + self.update(bytes) + } +} + +impl DigestHasher for Vec { + fn write(&mut self, bytes: &[u8]) { + self.extend(bytes.iter().cloned()) + } +} + +struct ImplHasher<'a, T>(&'a mut T); + +impl Hasher for ImplHasher<'_, T> { + fn write(&mut self, bytes: &[u8]) { + self.0.write(bytes) + } + + fn write_u16(&mut self, i: u16) { + self.0.write(&i.to_le_bytes()) + } + + fn write_u32(&mut self, i: u32) { + self.0.write(&i.to_le_bytes()) + } + + fn write_u64(&mut self, i: u64) { + self.0.write(&i.to_le_bytes()) + } + + fn write_usize(&mut self, i: usize) { + self.0.write(&i.to_le_bytes()) + } + + fn write_i16(&mut self, i: i16) { + self.0.write(&i.to_le_bytes()) + } + + fn write_i32(&mut self, i: i32) { + self.0.write(&i.to_le_bytes()) + } + + fn write_i64(&mut self, i: i64) { + self.0.write(&i.to_le_bytes()) + } + + fn write_isize(&mut self, i: isize) { + self.0.write(&i.to_le_bytes()) + } + + fn finish(&self) -> u64 { + unimplemented!() + } +} + +pub trait DigestHash: Hash { + fn hash(&self, state: &mut impl DigestHasher) { + Hash::hash(self, &mut ImplHasher(state)) + } + + fn sha256(&self) -> H256 { + let mut state = Sha256::new(); + DigestHash::hash(self, &mut state); + H256(state.finalize().into()) + } + + fn blake2(&self) -> H256 { + let mut state = Blake2b::::new(); + DigestHash::hash(self, &mut state); + H256(state.finalize().into()) + } +} +impl DigestHash for T {} + +pub use primitive_types::H256; + +#[derive( + Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, derive_more::Deref, +)] +pub struct Verifiable { + #[deref] + inner: M, + signature: S, +} + +impl Verifiable { + pub fn into_inner(self) -> M { + self.inner + } +} + +pub mod events { + #[derive(Debug, Clone)] + pub struct Signed(pub super::Verifiable); + + #[derive(Debug, Clone)] + pub struct Verified(pub super::Verifiable); +} + +// the cryptographic library must support seedable RNG based keypair generation +// to be used in this codebase +// it would be better if the library supports prehashed message as well, but a +// fallback `impl DigestHasher for Vec` is provided above anyway + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum Signature { + Plain(String), // for testing + Secp256k1(secp256k1::ecdsa::Signature), + Schnorrkel(peer::Signature), +} + +#[derive(Debug, Clone)] +pub struct Crypto { + provider: CryptoProvider, + public_keys: Vec, +} + +#[derive(Debug, Clone)] +enum CryptoProvider { + Insecure(String), // the "signature" + Secp256k1(Secp256k1Crypto), + Schnorrkel(Box), +} + +#[derive(Debug, Clone)] +struct Secp256k1Crypto { + secret_key: secp256k1::SecretKey, + secp: secp256k1::Secp256k1, +} + +#[derive(Debug, Clone)] +enum PublicKey { + Plain(String), + Secp256k1(secp256k1::PublicKey), + Schnorrkel(peer::PublicKey), +} + +#[derive(Debug, Clone, Copy)] +pub enum CryptoFlavor { + Plain, + Secp256k1, + Schnorrkel, +} + +impl Crypto { + pub fn new_hardcoded( + n: usize, + index: impl Into, + flavor: CryptoFlavor, + ) -> anyhow::Result { + let secret_keys = (0..n).map(|id| { + let mut k = [0; 32]; + let k1 = format!("replica-{id}"); + k[..k1.as_bytes().len()].copy_from_slice(k1.as_bytes()); + k + }); + let crypto = match flavor { + CryptoFlavor::Plain => Self { + public_keys: (0..n) + .map(|i| PublicKey::Plain(format!("replica-{i:03}"))) + .collect(), + provider: CryptoProvider::Insecure(format!("replica-{:03}", index.into())), + }, + CryptoFlavor::Secp256k1 => { + let secret_keys = secret_keys + .map(|k| secp256k1::SecretKey::from_slice(&k)) + .collect::, _>>()?; + let secp = secp256k1::Secp256k1::new(); + Self { + public_keys: secret_keys + .iter() + .map(|secret_key| PublicKey::Secp256k1(secret_key.public_key(&secp))) + .collect(), + provider: CryptoProvider::Secp256k1(Secp256k1Crypto { + secret_key: secret_keys[index.into()], + secp, + }), + } + } + CryptoFlavor::Schnorrkel => { + let mut secret_keys = secret_keys + .map(|k| { + Ok(schnorrkel::MiniSecretKey::from_bytes(&k)? + .expand_to_keypair(schnorrkel::ExpansionMode::Uniform)) + }) + .collect::, _>>() + .map_err(anyhow::Error::msg::)?; + Self { + public_keys: secret_keys + .iter() + .map(|keypair| PublicKey::Schnorrkel(keypair.public)) + .collect(), + provider: CryptoProvider::Schnorrkel(Box::new(peer::Crypto { + keypair: secret_keys.remove(index.into()), + context: schnorrkel::signing_context(b"default"), + })), + } + } + }; + Ok(crypto) + } + + pub fn new_random( + flavor: CryptoFlavor, + ) -> anyhow::Result { + let crypto = match flavor { + CryptoFlavor::Plain => { + let mut rng = rand::thread_rng(); + let secret_key = (0..32).map(|_| rng.gen()).collect::>(); + let public_key = secret_key.iter().map(|byte| format!("{:02x}", byte)).collect(); + Self { + public_keys: vec![PublicKey::Plain(public_key)], + provider: CryptoProvider::Insecure(format!("{:?}", secret_key)), + } + }, + CryptoFlavor::Secp256k1 => { + let secp = secp256k1::Secp256k1::new(); + let (secret_key, public_key) = secp.generate_keypair(&mut OsRng); + Self { + public_keys: vec![PublicKey::Secp256k1(public_key)], + provider: CryptoProvider::Secp256k1(Secp256k1Crypto { + secret_key, + secp, + }), + } + }, + CryptoFlavor::Schnorrkel => { + let mini_secret_key = schnorrkel::MiniSecretKey::generate_with(&mut OsRng); + let keypair = mini_secret_key.expand_to_keypair(schnorrkel::ExpansionMode::Uniform); + Self { + public_keys: vec![PublicKey::Schnorrkel(keypair.public)], + provider: CryptoProvider::Schnorrkel(Box::new(peer::Crypto { + keypair, + context: schnorrkel::signing_context(b"default"), + })), + } + } + }; + Ok(crypto) + } + + pub fn to_hex(&self) -> Option<(String, String)> { + match &self.provider { + CryptoProvider::Secp256k1(secp256k1_crypto) => { + let secret_key_hex = hex::encode(secp256k1_crypto.secret_key.secret_bytes()); + let public_key_hex = hex::encode(secp256k1_crypto.secret_key.public_key(&secp256k1_crypto.secp).serialize()); + Some((secret_key_hex, public_key_hex)) + }, + _ => None, + } + } + + pub fn sign(&self, message: M) -> Verifiable { + match &self.provider { + CryptoProvider::Insecure(signature) => Verifiable { + inner: message, + signature: Signature::Plain(signature.clone()), + }, + CryptoProvider::Secp256k1(crypto) => { + let digest = secp256k1::Message::from_digest(message.sha256().into()); + Verifiable { + inner: message, + signature: Signature::Secp256k1( + crypto.secp.sign_ecdsa(&digest, &crypto.secret_key), + ), + } + } + CryptoProvider::Schnorrkel(crypto) => { + let signed = crypto.sign(message); + // this feels monkey patch = = + Verifiable { + inner: signed.inner, + signature: Signature::Schnorrkel(signed.signature), + } + } + } + } + + pub fn verify( + &self, + index: impl Into, + signed: &Verifiable, + ) -> anyhow::Result<()> { + let Some(public_key) = self.public_keys.get(index.into()) else { + anyhow::bail!("no identifier for index") + }; + match (&self.provider, public_key, &signed.signature) { + ( + CryptoProvider::Insecure(_), + PublicKey::Plain(expected_signature), + Signature::Plain(signature), + ) => anyhow::ensure!(signature == expected_signature), + + ( + CryptoProvider::Secp256k1(crypto), + PublicKey::Secp256k1(public_key), + Signature::Secp256k1(signature), + ) => { + let digest = secp256k1::Message::from_digest(signed.inner.sha256().into()); + crypto.secp.verify_ecdsa(&digest, signature, public_key)? + } + // this feels even more monkey patch > < + ( + CryptoProvider::Schnorrkel(crypto), + PublicKey::Schnorrkel(public_key), + Signature::Schnorrkel(signature), + ) => crypto.verify_internal(public_key, &signed.inner, signature)?, + _ => anyhow::bail!("unimplemented"), + } + Ok(()) + } + + // TODO deduplicate with the `peer::Crypto` version + pub fn verify_batch, M: DigestHash>( + &self, + indexes: &[I], + signed: &[Verifiable], + ) -> anyhow::Result<()> { + let CryptoProvider::Schnorrkel(crypto) = &self.provider else { + anyhow::bail!("unimplemented") // TODO fallback to verify one by one? + }; + let mut transcripts = Vec::new(); + let mut signatures = Vec::new(); + let mut public_keys = Vec::new(); + for (index, verifiable) in indexes.iter().zip(signed) { + let ( + PublicKey::Schnorrkel(public_key), + Signature::Schnorrkel(peer::Signature(signature)), + ) = ( + &self.public_keys[index.clone().into()], + &verifiable.signature, + ) + else { + anyhow::bail!("unimplemented") + }; + let mut state = Sha256::new(); + DigestHash::hash(&verifiable.inner, &mut state); + transcripts.push(crypto.context.hash256(state)); + signatures.push(*signature); + public_keys.push(*public_key) + } + schnorrkel::verify_batch(transcripts, &signatures, &public_keys, true) + .map_err(anyhow::Error::msg) + } +} + +pub mod peer { + use std::{fmt::Debug, hash::Hash}; + + use derive_where::derive_where; + use rand::{CryptoRng, RngCore}; + use schnorrkel::{context::SigningContext, Keypair}; + use serde::{Deserialize, Serialize}; + use sha2::{Digest, Sha256}; + + use super::DigestHash; + + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] + pub struct Signature(pub schnorrkel::Signature); + + impl Ord for Signature { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.to_bytes().cmp(&other.0.to_bytes()) + } + } + + impl PartialOrd for Signature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Hash for Signature { + fn hash(&self, state: &mut H) { + Hash::hash(&self.0.to_bytes(), state) + } + } + + pub type Verifiable = super::Verifiable; + + pub type PublicKey = schnorrkel::PublicKey; + + pub mod events { + #[derive(Debug, Clone)] + pub struct Signed(pub super::Verifiable); + + #[derive(Debug, Clone)] + pub struct Verified(pub super::Verifiable); + } + + #[derive(Clone)] + #[derive_where(Debug)] + pub struct Crypto { + pub keypair: Keypair, + #[derive_where(skip)] + pub context: SigningContext, + } + + impl Crypto { + pub fn new_random(rng: &mut (impl CryptoRng + RngCore)) -> Self { + Self { + keypair: Keypair::generate_with(rng), + context: SigningContext::new(b"default"), + } + } + + pub fn public_key(&self) -> PublicKey { + self.keypair.public + } + + pub fn sign(&self, message: M) -> Verifiable { + let mut state = Sha256::new(); + DigestHash::hash(&message, &mut state); + let signature = self.keypair.sign(self.context.hash256(state)); + Verifiable { + inner: message, + signature: Signature(signature), + } + } + + pub fn verify( + &self, + public_key: &PublicKey, + signed: &Verifiable, + ) -> anyhow::Result<()> { + self.verify_internal(public_key, &signed.inner, &signed.signature) + } + + pub fn verify_internal( + &self, + public_key: &PublicKey, + message: &M, + Signature(signature): &Signature, + ) -> anyhow::Result<()> { + let mut state = Sha256::new(); + DigestHash::hash(message, &mut state); + public_key + .verify(self.context.hash256(state), signature) + .map_err(anyhow::Error::msg) + } + + pub fn verify_batch( + &self, + public_keys: &[PublicKey], + signed: &[Verifiable], + ) -> anyhow::Result<()> { + let mut transcripts = Vec::new(); + let mut signatures = Vec::new(); + for verifiable in signed { + let mut state = Sha256::new(); + DigestHash::hash(&verifiable.inner, &mut state); + transcripts.push(self.context.hash256(state)); + signatures.push(verifiable.signature.0); + } + schnorrkel::verify_batch(transcripts, &signatures, public_keys, true) + .map_err(anyhow::Error::msg) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn struct_digest() { + #[derive(Hash)] + struct Foo { + a: u32, + bs: Vec, + } + let foo = Foo { + a: 42, + bs: b"hello".to_vec(), + }; + assert_ne!(foo.sha256(), Default::default()); + } + + #[test] + fn generate_keypair() { + let crypto = Crypto::new_random(CryptoFlavor::Secp256k1); + assert!(crypto.is_ok()); + let crypto = crypto.unwrap(); + println!("{:?}", crypto); + let keys = crypto.to_hex(); + assert!(keys.is_some()); + println!("{:?}", keys.unwrap()); + } + + #[test] + fn verify_batched() -> anyhow::Result<()> { + let message = "hello"; + let crypto = (0..4usize) + .map(|i| Crypto::new_hardcoded(4, i, CryptoFlavor::Schnorrkel)) + .collect::>>()?; + let verifiable = crypto + .iter() + .map(|crypto| crypto.sign(message)) + .collect::>(); + crypto[0].verify_batch(&[0usize, 1, 2, 3], &verifiable) + } +} \ No newline at end of file diff --git a/crates/crypto/src/lib.rs b/crates/crypto/src/lib.rs new file mode 100644 index 0000000..4ed1c9a --- /dev/null +++ b/crates/crypto/src/lib.rs @@ -0,0 +1,2 @@ +pub mod core; +pub mod recovery; \ No newline at end of file diff --git a/crates/crypto/src/recovery.rs b/crates/crypto/src/recovery.rs new file mode 100644 index 0000000..a19a152 --- /dev/null +++ b/crates/crypto/src/recovery.rs @@ -0,0 +1,127 @@ +use anyhow::Ok; +use hex::FromHex; +use rand::rngs::OsRng; +use secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; +use sha3::{Digest, Keccak256}; + +pub fn public_key_to_address(public_key_hex: &str) -> anyhow::Result { + let public_key_bytes = hex::decode(public_key_hex)?; + + let mut hasher = Keccak256::new(); + hasher.update(&public_key_bytes[1..]); + let binding = hasher.finalize(); + let hash_bytes = binding.as_slice(); + + let address_bytes = &hash_bytes[hash_bytes.len() - 20..]; + + let mut address = "0x".to_owned(); + address.push_str(&hex::encode(address_bytes)); + + Ok(address) +} + +pub fn gen_secp256k1_keypair() -> (String, String) { + let secp = secp256k1::Secp256k1::new(); + let (secret_key, public_key) = secp.generate_keypair(&mut OsRng); + let secret_key_hex = hex::encode(secret_key.secret_bytes()); + let public_key_hex = hex::encode(public_key.serialize()); + (secret_key_hex, public_key_hex) +} + +pub fn sign_message_recover_pk( + secp: &secp256k1::Secp256k1, + secret_key: &secp256k1::SecretKey, + message: &[u8], +) -> anyhow::Result { + let message = secp256k1::Message::from_digest_slice(message)?; + Ok(secp.sign_ecdsa_recoverable(&message, &secret_key)) +} + +pub fn recover_public_key( + secp: &secp256k1::Secp256k1, + signature: &RecoverableSignature, + message: &[u8], +) -> anyhow::Result { + let message = secp256k1::Message::from_digest_slice(message)?; + let pub_key = secp.recover_ecdsa(&message, &signature)?; + Ok(pub_key) +} + +pub fn verify_secp256k1_recovery_pk( + signature_hex: &str, + message_hex: &str, +) -> anyhow::Result<(), anyhow::Error> { + let signature_bytes = Vec::from_hex(signature_hex)?; + let message_bytes = Vec::from_hex(message_hex)?; + + let secp = secp256k1::Secp256k1::new(); + + let recovery_id = RecoveryId::from_i32(i32::from(signature_bytes[64]))?; + let signatures_no_id = &signature_bytes[0..64]; + + let recoverable_signature = RecoverableSignature::from_compact(signatures_no_id, recovery_id)?; + let message = secp256k1::Message::from_digest_slice(&message_bytes)?; + let public_key = secp.recover_ecdsa(&message, &recoverable_signature)?; + + let signature = secp256k1::ecdsa::Signature::from_compact(signatures_no_id)?; + secp.verify_ecdsa(&message, &signature, &public_key)?; + Ok(()) +} + +pub fn verify_secp256k1_recovery_pk_bytes( + signature_bytes: Vec, + message_bytes: [u8; 32], +) -> anyhow::Result { + + let secp = secp256k1::Secp256k1::new(); + + let recovery_id = RecoveryId::from_i32(i32::from(signature_bytes[64]))?; + let signatures_no_id = &signature_bytes[0..64]; + + let recoverable_signature = RecoverableSignature::from_compact(signatures_no_id, recovery_id)?; + let message = secp256k1::Message::from_digest_slice(&message_bytes)?; + let pub_key = secp.recover_ecdsa(&message, &recoverable_signature)?; + Ok(pub_key) +} + +#[cfg(test)] +mod tests { + use crate::core::DigestHash; + use rand::rngs::OsRng; + + use super::*; + + #[test] + fn sign_recover_verify() { + use DigestHash as _; + + let secp = secp256k1::Secp256k1::new(); + let (secret_key, public_key) = secp.generate_keypair(&mut OsRng); + let secret_key_hex = hex::encode(secret_key.secret_bytes()); + let public_key_hex = hex::encode(public_key.serialize()); + println!("pri :{} \npub : {}", secret_key_hex, public_key_hex); + + let message = "Hello, Ethereum!".to_owned(); + let msg = message.sha256().to_fixed_bytes(); + let signature_recover = sign_message_recover_pk(&secp, &secret_key, &msg).unwrap(); + let serialized_signature = signature_recover.serialize_compact(); + println!("sig struct: {:?}", serialized_signature); + + let recovery_id_byte = serialized_signature.0.to_i32() as u8; + let mut serialized_with_recovery_id = serialized_signature.1.to_vec(); + serialized_with_recovery_id.push(recovery_id_byte); + let sig_hex = hex::encode(serialized_with_recovery_id); + let msg_hex = hex::encode(msg); + println!( + "Signature with recovery ID in hex: {}, len = {},\n msg_hex: {}", + sig_hex, + sig_hex.len(), + msg_hex + ); + + let ret = verify_secp256k1_recovery_pk(&sig_hex, &msg_hex); + let recover_pubkey = recover_public_key(&secp, &signature_recover, &msg).unwrap(); + assert!(ret.is_ok()); + assert_eq!(recover_pubkey, public_key); + } +} diff --git a/crates/enclaves/Cargo.toml b/crates/enclaves/Cargo.toml new file mode 100644 index 0000000..5ff57f9 --- /dev/null +++ b/crates/enclaves/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "enclaves" +version = "0.1.0" +edition = "2021" + +[features] +nitro-enclaves = ["aws-nitro-enclaves-nsm-api", "aws-nitro-enclaves-attestation"] + +[dependencies] +bincode = "1.3.3" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" +anyhow = { version = "1.0.79", features = ["backtrace"] } +nix = { version = "0.28.0", features = ["socket", "sched", "resource"] } +tokio = { version = "1.35.1", features = ["net", "time", "sync", "rt", "signal", "macros", "rt-multi-thread", "fs", "process", "io-util"] } +aws-nitro-enclaves-nsm-api = { version = "0.4.0", optional = true } +aws-nitro-enclaves-attestation = { git = "https://github.com/neatsys/aws-nitro-enclaves-attestation", version = "0.1.0", optional = true } \ No newline at end of file diff --git a/crates/enclaves/src/lib.rs b/crates/enclaves/src/lib.rs new file mode 100644 index 0000000..f4dc047 --- /dev/null +++ b/crates/enclaves/src/lib.rs @@ -0,0 +1 @@ +pub mod nitro_secure; diff --git a/crates/enclaves/src/nitro_secure.rs b/crates/enclaves/src/nitro_secure.rs new file mode 100644 index 0000000..5f61af9 --- /dev/null +++ b/crates/enclaves/src/nitro_secure.rs @@ -0,0 +1,157 @@ +use std::sync::Arc; +use std::{future::Future, pin::Pin}; +use tokio::sync::mpsc::UnboundedSender; +use tracing::warn; + +/// HandleCallbackFn is running handler behind in vsock. +/// params: input_buf, nsm (nitro secure module), pcrs, write_sender(reply sender) +pub type HandleFn = Arc< + dyn Fn( + Vec, + Arc, + Arc<[Vec; 3]>, + UnboundedSender>, + ) -> Pin> + Send>> + + Send + + Sync, +>; + +#[derive(Debug)] +pub struct NitroSecureModule(pub i32); + +#[cfg(feature = "nitro-enclaves")] +impl NitroSecureModule { + fn new() -> anyhow::Result { + let fd = aws_nitro_enclaves_nsm_api::driver::nsm_init(); + anyhow::ensure!(fd >= 0); + Ok(Self(fd)) + } + + pub fn process_attestation(&self, user_data: Vec) -> anyhow::Result> { + use aws_nitro_enclaves_nsm_api::api::Request::Attestation; + // some silly code to avoid explicitly mention `serde_bytes::ByteBuf` + let mut request = Attestation { + user_data: Some(Default::default()), + nonce: None, + public_key: None, + }; + let Attestation { + user_data: Some(buf), + .. + } = &mut request + else { + anyhow::bail!("user_data is None in Attestation request"); + }; + + buf.extend(user_data); + match aws_nitro_enclaves_nsm_api::driver::nsm_process_request(self.0, request) { + aws_nitro_enclaves_nsm_api::api::Response::Attestation { document } => Ok(document), + aws_nitro_enclaves_nsm_api::api::Response::Error(err) => anyhow::bail!("{err:?}"), + response => anyhow::bail!("Unexpected response: {:?}", response), + } + } + + fn describe_pcr(&self, index: u16) -> anyhow::Result> { + use aws_nitro_enclaves_nsm_api::api::Request::DescribePCR; + match aws_nitro_enclaves_nsm_api::driver::nsm_process_request(self.0, DescribePCR { index }) + { + aws_nitro_enclaves_nsm_api::api::Response::DescribePCR { lock: _, data } => Ok(data), + aws_nitro_enclaves_nsm_api::api::Response::Error(err) => anyhow::bail!("{err:?}"), + response => anyhow::bail!("Unexpected response: {:?}", response), + } + } + + pub async fn run(port: u32, handler: HandleFn) -> anyhow::Result<()> { + use std::os::fd::AsRawFd; + + use nix::sys::socket::{ + bind, listen, socket, AddressFamily, Backlog, SockFlag, SockType, VsockAddr, + }; + use tokio::{ + io::{AsyncReadExt as _, AsyncWriteExt as _}, + sync::mpsc::unbounded_channel, + }; + + let nsm = std::sync::Arc::new(Self::new()?); + let pcrs = Arc::new([ + nsm.describe_pcr(0)?, + nsm.describe_pcr(1)?, + nsm.describe_pcr(2)?, + ]); + + let socket_fd = socket( + AddressFamily::Vsock, + SockType::Stream, + SockFlag::empty(), + None, + )?; + bind(socket_fd.as_raw_fd(), &VsockAddr::new(0xFFFFFFFF, port))?; + // theoretically this is the earliest point to entering Tokio world, but i don't want to go + // unsafe with `FromRawFd`, and Tokio don't have a `From` yet + listen(&socket_fd, Backlog::new(64)?)?; + let socket = std::os::unix::net::UnixListener::from(socket_fd); + socket.set_nonblocking(true)?; + let socket = tokio::net::UnixListener::from_std(socket)?; + + loop { + let (stream, _) = socket.accept().await?; + let (mut read_half, mut write_half) = stream.into_split(); + let (write_sender, mut write_receiver) = unbounded_channel::>(); + + let mut write_session = tokio::spawn(async move { + while let Some(buf) = write_receiver.recv().await { + write_half.write_u64_le(buf.len() as _).await?; + write_half.write_all(&buf).await?; + } + anyhow::Ok(()) + }); + let nsm = nsm.clone(); + let pcrs = pcrs.clone(); + let handler = handler.clone(); + let mut read_session = tokio::spawn(async move { + loop { + let task = async { + let len = read_half.read_u64_le().await?; + let mut buf = vec![0; len as _]; + read_half.read_exact(&mut buf).await?; + anyhow::Ok(buf) + }; + let buf = match task.await { + Ok(buf) => buf, + Err(err) => { + warn!("{err}"); + return anyhow::Ok(()); + } + }; + let nsm_clone = nsm.clone(); + let pcrs_clone = pcrs.clone(); + let write_sender = write_sender.clone(); + let handler = handler.clone(); + tokio::spawn(async move { + if let Err(err) = handler(buf, nsm_clone, pcrs_clone, write_sender).await { + eprintln!("Error: {:?}", err); + } + }); + } + }); + // this loop keeps one connect, and still works when meets some error in only connect. + loop { + let result = tokio::select! { + result = &mut read_session, if !read_session.is_finished() => result, + result = &mut write_session, if !write_session.is_finished() => result, + else => break, + }; + if let Err(err) = result.map_err(Into::into).and_then(std::convert::identity) { + warn!("{err}") + } + } + } + } +} + +#[cfg(feature = "nitro-enclaves")] +impl Drop for NitroSecureModule { + fn drop(&mut self) { + aws_nitro_enclaves_nsm_api::driver::nsm_exit(self.0) + } +} diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml new file mode 100644 index 0000000..33853e4 --- /dev/null +++ b/crates/types/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "types" +version = "0.1.0" +edition = "2021" + +[dependencies] +derive_more = "0.99.17" +derive-where = "1.2.7" +serde_json = "1.0.114" +serde = { version = "1.0.195", features = ["derive"] } diff --git a/crates/types/src/configuration.rs b/crates/types/src/configuration.rs new file mode 100644 index 0000000..862aa0e --- /dev/null +++ b/crates/types/src/configuration.rs @@ -0,0 +1,9 @@ + +/// Networks consts +/// Suggested buffer size +pub const DEFAULT_MAX_DATAGRAM_SIZE: usize = 65507; + +/// Round number of a block. +pub type Round = u64; + +pub const GENESIS_ROUND: Round = 0; \ No newline at end of file diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs new file mode 100644 index 0000000..9277d13 --- /dev/null +++ b/crates/types/src/lib.rs @@ -0,0 +1,2 @@ +pub mod raw_wrapper; +pub mod configuration; \ No newline at end of file diff --git a/crates/types/src/raw_wrapper.rs b/crates/types/src/raw_wrapper.rs new file mode 100644 index 0000000..b2bf817 --- /dev/null +++ b/crates/types/src/raw_wrapper.rs @@ -0,0 +1,32 @@ +use std::{fmt::Debug, hash::Hash}; +use serde::{Deserialize, Serialize}; + +/// Payload is the vec but derive many traits, like hash, debug, clone, etc. +#[derive( + Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, derive_more::Deref, Serialize, Deserialize, +)] +pub struct Payload(pub Vec); + +impl Debug for Payload { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let Ok(s) = std::str::from_utf8(&self.0) { + write!(f, "Payload(\"{s}\")") + } else { + write!( + f, + "Payload({}{})", + self.0 + .iter() + .map(|b| format!("{b:02x}")) + .take(32) + .collect::>() + .concat(), + if self.0.len() > 32 { + format!(".. ", self.0.len()) + } else { + String::new() + } + ) + } + } +} \ No newline at end of file diff --git a/crates/vlc/Cargo.toml b/crates/vlc/Cargo.toml index 7869f75..e699240 100644 --- a/crates/vlc/Cargo.toml +++ b/crates/vlc/Cargo.toml @@ -6,4 +6,20 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +sha2 = "0.10.8" +sha3 = "0.10.1" +rand = "0.8.5" +rand_distr = "0.4.3" +bincode = "1.3.3" +tracing = "0.1.40" +futures = "0.3.30" +num_cpus = "1.13.1" +derive_more = "0.99.17" +derive-where = "1.2.7" serde = { version = "1", features = ["derive"] } +anyhow = { version = "1.0.79", features = ["backtrace"] } +tracing-subscriber = "0.3.18" +secp256k1 = { version = "0.29.0", features = ["rand-std", "serde", "recovery"] } +tokio = { version = "1.35.1", features = ["net", "time", "sync", "rt", "signal", "macros", "rt-multi-thread", "fs", "process", "io-util"] } +tokio-util = "0.7.10" +crypto ={ path = "../crypto", version = "0.1.0"} diff --git a/crates/vlc/src/lib.rs b/crates/vlc/src/lib.rs index aaa6e9c..a5f1827 100644 --- a/crates/vlc/src/lib.rs +++ b/crates/vlc/src/lib.rs @@ -2,8 +2,9 @@ //! //! This crate implements a verifiable logical clock construct. The clock //! can be used in a peer-to-peer network to order events. Any node in the -//! network can verify the correctness of the clock. - +//! network can verify the correctness of the clock. And HashMap as its core +//! data structure. +pub mod ordinary_clock; use serde::{Deserialize, Serialize}; use std::cmp; use std::collections::HashMap; diff --git a/crates/vlc/src/ordinary_clock.rs b/crates/vlc/src/ordinary_clock.rs new file mode 100644 index 0000000..c1c8d24 --- /dev/null +++ b/crates/vlc/src/ordinary_clock.rs @@ -0,0 +1,388 @@ +//! This clock use the BTreeMap as its core data structure. + +use std::{cmp::Ordering, collections::BTreeMap}; +use serde::{Deserialize, Serialize}; +use sha2::{Sha256, Digest}; +use bincode::Options; + +pub trait Clock: PartialOrd + Clone + Send + Sync + 'static { + fn reduce(&self) -> LamportClock; +} + +pub type LamportClock = u64; + +impl Clock for LamportClock { + fn reduce(&self) -> LamportClock { + *self + } +} + +/// clock key_id +pub type KeyId = u64; + +#[derive( + Debug, Clone, PartialEq, Eq, Hash, Default, derive_more::Deref, Serialize, Deserialize, +)] +pub struct OrdinaryClock(pub BTreeMap); + +impl AsRef for OrdinaryClock { + fn as_ref(&self) -> &OrdinaryClock { + self + } +} + +impl OrdinaryClock { + pub fn new() -> Self { + Self::default() + } + + pub fn is_genesis(&self) -> bool { + self.0.values().all(|n| *n == 0) + } + + fn merge(&self, other: &Self) -> Self { + let merged = self + .0 + .keys() + .chain(other.0.keys()) + .map(|id| { + let n = match (self.0.get(id), other.0.get(id)) { + (Some(n), Some(other_n)) => (*n).max(*other_n), + (Some(n), None) | (None, Some(n)) => *n, + (None, None) => unreachable!(), + }; + (*id, n) + }) + .collect(); + Self(merged) + } + + pub fn update<'a>(&'a self, others: impl Iterator, id: u64) -> Self { + let mut updated = others.fold(self.clone(), |version, dep| version.merge(dep)); + *updated.0.entry(id).or_default() += 1; + updated + } + + pub fn base<'a>(others: impl Iterator) -> Self { + let mut combined = BTreeMap::new(); + + for clock in others { + for (&key, &value) in &clock.0 { + combined + .entry(key) + .and_modify(|e: &mut u64| *e = (*e).min(value)) + .or_insert(value); + } + } + + OrdinaryClock(combined) + } + + pub fn calculate_sha256(&self) -> [u8; 32] { + let mut hasher = Sha256::new(); + let data = bincode::options().serialize(&self.0).expect("Failed to serialize data"); + // Update the hasher with the JSON string + hasher.update(data); + + // Calculate the hash & return bytes + hasher.finalize().into() + } +} + +impl PartialOrd for OrdinaryClock { + fn partial_cmp(&self, other: &Self) -> Option { + fn ge(clock: &OrdinaryClock, other_clock: &OrdinaryClock) -> bool { + for (other_id, other_n) in &other_clock.0 { + if *other_n == 0 { + continue; + } + let Some(n) = clock.0.get(other_id) else { + return false; + }; + if n < other_n { + return false; + } + } + true + } + match (ge(self, other), ge(other, self)) { + (true, true) => Some(Ordering::Equal), + (true, false) => Some(Ordering::Greater), + (false, true) => Some(Ordering::Less), + (false, false) => None, + } + } +} + +impl OrdinaryClock { + pub fn dep_cmp(&self, other: &Self, id: KeyId) -> Ordering { + match (self.0.get(&id), other.0.get(&id)) { + // disabling this check after the definition of genesis clock has been extended + // haven't revealed any bug with this assertion before, hopefully disabling it will not + // hide any bug in the future as well + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + // this can happen on the startup insertion + (None, None) => Ordering::Equal, + (Some(n), Some(m)) => n.cmp(m), + } + } +} + +impl Clock for OrdinaryClock { + fn reduce(&self) -> LamportClock { + self.0.values().copied().sum() + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use std::{sync::{atomic::{AtomicUsize, Ordering}, Arc}, time::{Duration, Instant}}; + use rand::rngs::OsRng; + use futures::future::join_all; + use tokio::runtime::Builder; + use crypto::{core::DigestHash, recovery::{recover_public_key, sign_message_recover_pk}}; + + + #[test] + fn default_is_genesis() -> anyhow::Result<()> { + anyhow::ensure!(OrdinaryClock::default().is_genesis()); + Ok(()) + } + + #[test] + fn test_clock_base_func() -> anyhow::Result<()> { + let mut clock1 = BTreeMap::new(); + clock1.insert(1, 10); + clock1.insert(2, 0); + clock1.insert(3, 5); + + let mut clock2 = BTreeMap::new(); + clock2.insert(1, 0); + clock2.insert(2, 20); + clock2.insert(3, 2); + + let mut clock3 = BTreeMap::new(); + clock3.insert(1, 7); + clock3.insert(2, 15); + clock3.insert(4, 8); + + let oc1 = OrdinaryClock(clock1); + let oc2 = OrdinaryClock(clock2); + let oc3 = OrdinaryClock(clock3); + + let clocks = vec![&oc1, &oc2, &oc3]; + let base_clock = OrdinaryClock::base(clocks.into_iter()); + println!("{:?}", base_clock); // Should print: OrdinaryClock({1: 0, 2: 0, 3: 2, 4: 8}) + assert_eq!(base_clock, OrdinaryClock(BTreeMap::from([(1, 0), (2, 0), (3, 2), (4, 8)]))); + Ok(()) + } + + #[test] + fn clock_sha256() -> anyhow::Result<()> { + let mut clock = OrdinaryClock((0..4).map(|i| (i as _, 0)).collect()); + clock = clock.update(vec![OrdinaryClock::default()].iter(), 0); + println!("{:?}, {:?}", clock, clock.calculate_sha256()); + + // Tips: when clock is hashmap, this serialize and sha256 can't reproduce, every time is different. + Ok(()) + } + + #[test] + #[ignore] + fn hash_big_clock_sha256() -> anyhow::Result<()> { + let clock = OrdinaryClock((0..1<<27).map(|i| (i as _, 0)).collect()); + let start_time = Instant::now(); + let clock_hash = clock.sha256().to_fixed_bytes(); + println!("{:?}, {:?}", clock_hash, start_time.elapsed()); + Ok(()) + } + + #[tokio::test] + #[ignore] + async fn stress_raw_update() -> anyhow::Result<()> { + for size in (0..=12).step_by(2).map(|n| 1 << n) { + let num_merged = 0; + let clock = OrdinaryClock((0..size).map(|i| (i as _, 0)).collect()); + + let mut count = 0; + let start_time = Instant::now(); + let close_loops_session = async { + let mut current_clock = clock.clone(); + loop { + if start_time.elapsed() >= Duration::from_secs(10) { + break; + } + + let updated_clock = current_clock.update(vec![clock.clone(); num_merged].iter(), 0); + count += 1; + current_clock = updated_clock; + } + anyhow::Ok(()) + }; + + close_loops_session.await?; + println!( + "key {size},merged {num_merged}, tps {}", + count as f32 / 10. + ); + } + Ok(()) + } + + #[tokio::test] + #[ignore] + async fn stress_raw_update_concurrency() -> anyhow::Result<()> { + let core = num_cpus::get(); + let rt = Arc::new(Builder::new_multi_thread() + .worker_threads(core) + .build() + .unwrap()); + + for size in (0..=12).step_by(2).map(|n| 1 << n) { + let count = Arc::new(AtomicUsize::new(0)); + let mut tasks = Vec::new(); + let mut shifts: Vec = Vec::with_capacity(core); + for _ in 0..core { + shifts.push(size); + } + for size in shifts { + let num_merged = 0; + let clock = OrdinaryClock((0..size).map(|i| (i as _, 0)).collect()); + + let count_clone = Arc::clone(&count); + let start_time = Instant::now(); + let close_loops_session = async move { + // different clocks in different threads + let mut current_clock = clock.clone(); + loop { + if start_time.elapsed() >= Duration::from_secs(10) { + break; + } + + let updated_clock = current_clock.update(vec![clock.clone(); num_merged].iter(), 0); + count_clone.fetch_add(1, Ordering::Relaxed); + current_clock = updated_clock; + } + current_clock + }; + tasks.push(rt.spawn(close_loops_session)); + } + let results = join_all(tasks).await; + for result in results { + let clock = result?; + println!("key: {}, clock: {:?}", size, clock.0.get(&0)); + } + + println!( + "key {}, merged 0, tps {}", + size, + count.load(Ordering::Relaxed) as f32 / 10. + ); + } + + // Shutdown Runtime + Arc::try_unwrap(rt).unwrap().shutdown_background(); + + Ok(()) + } + + #[tokio::test] + #[ignore] + async fn stress_verify_update() -> anyhow::Result<()> { + use DigestHash as _; + + let secp = secp256k1::Secp256k1::new(); + let (secret_key, public_key) = secp.generate_keypair(&mut OsRng); + + for size in (0..=12).step_by(2).map(|n| 1 << n) { + let num_merged = 0; + let clock = OrdinaryClock((0..size).map(|i| (i as _, 0)).collect()); + let clock_hash = clock.sha256().to_fixed_bytes(); + let mut count = 0; + + // sign once + let signature_recover = sign_message_recover_pk(&secp, &secret_key, &clock.sha256().to_fixed_bytes()).unwrap(); + + let start_time = Instant::now(); + let close_loops_session = async { + let mut current_clock = clock.clone(); + loop { + if start_time.elapsed() >= Duration::from_secs(10) { + break; + } + + // verify + let recover_pubkey = recover_public_key(&secp, &signature_recover, &clock_hash).unwrap(); + assert_eq!(recover_pubkey, public_key); + + // update + let updated_clock = current_clock.update(vec![clock.clone(); num_merged].iter(), 0); + count += 1; + current_clock = updated_clock; + } + anyhow::Ok(()) + }; + + close_loops_session.await?; + println!( + "key {size},merged {num_merged}, tps {}", + count as f32 / 10. + ); + } + Ok(()) + } + + #[tokio::test] + #[ignore] + async fn stress_signature_verify_update() -> anyhow::Result<()> { + use DigestHash as _; + + let secp = secp256k1::Secp256k1::new(); + let (secret_key, public_key) = secp.generate_keypair(&mut OsRng); + + for size in (0..=12).step_by(2).map(|n| 1 << n) { + let num_merged = 0; + let clock = OrdinaryClock((0..size).map(|i| (i as _, 0)).collect()); + + let mut count = 0; + let mut signatures = None; + let start_time = Instant::now(); + let close_loops_session = async { + let mut current_clock = clock.clone(); + loop { + if start_time.elapsed() >= Duration::from_secs(10) { + break; + } + + // verify + if !signatures.is_none() { + let clock_hash = current_clock.sha256().to_fixed_bytes(); + let recover_pubkey = recover_public_key(&secp, &signatures.unwrap(), &clock_hash).unwrap(); + assert_eq!(recover_pubkey, public_key); + } + + // update + let updated_clock = current_clock.update(vec![clock.clone(); num_merged].iter(), 0); + count += 1; + current_clock = updated_clock; + + // sign + let signature_recover = sign_message_recover_pk(&secp, &secret_key, ¤t_clock.sha256().to_fixed_bytes()); + signatures = Some(signature_recover.unwrap()); + } + anyhow::Ok(()) + }; + + close_loops_session.await?; + println!( + "key {size},merged {num_merged}, tps {}", + count as f32 / 10. + ); + } + Ok(()) + } + +} \ No newline at end of file diff --git a/crates/vrf/Cargo.toml b/crates/vrf/Cargo.toml new file mode 100644 index 0000000..408934a --- /dev/null +++ b/crates/vrf/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "vrf" +version = "0.1.0" +edition = "2021" + +[dependencies] +num-bigint = "0.4.6" +num-traits = "0.2.19" +bincode = "1.1.1" +bytes = "1.6.1" +curve25519-dalek = "4.1.3" +derive_deref = "1.0.2" +anyhow = { version = "1.0.79", features = ["backtrace"] } +failure = "0.1.3" +thiserror = "1.0.63" +ed25519-dalek = { version = "2.1.1", features = ["serde", "digest", "rand_core", "pem", "pkcs8"] } +hex = "0.4.3" +lazy_static = "1.3.0" +proptest = "1.5.0" +proptest-derive = "0.5.0" +rand = { version = "0.8.5" } +rand_core = "0.6.4" +serde = { version = "1.0.89", features = ["derive"] } diff --git a/crates/vrf/src/ecvrf.rs b/crates/vrf/src/ecvrf.rs new file mode 100644 index 0000000..4ad2c53 --- /dev/null +++ b/crates/vrf/src/ecvrf.rs @@ -0,0 +1,336 @@ +//! This module implements an instantiation of a verifiable random function known as +//! [ECVRF-ED25519-SHA512-TAI](https://tools.ietf.org/html/draft-irtf-cfrg-vrf-04). +//! +//! # Examples +//! +//! ``` +//! use vrf::{traits::Uniform, ecvrf::*}; +//! use rand::{rngs::StdRng, SeedableRng}; +//! +//! let message = b"Test message"; +//! let mut rng: StdRng = SeedableRng::from_seed([0; 32]); +//! let private_key = VRFPrivateKey::generate_for_testing(&mut rng); +//! let public_key: VRFPublicKey = (&private_key).into(); +//! ``` +//! **Note**: The above example generates a private key using a private function intended only for +//! testing purposes. Production code should find an alternate means for secure key generation. +//! +//! Produce a proof for a message from a `VRFPrivateKey`, and verify the proof and message +//! using a `VRFPublicKey`: +//! +//! ``` +//! # use vrf::{traits::Uniform, ecvrf::*}; +//! # use rand::{rngs::StdRng, SeedableRng}; +//! # let message = b"Test message"; +//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]); +//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng); +//! # let public_key: VRFPublicKey = (&private_key).into(); +//! let proof = private_key.prove(message); +//! assert!(public_key.verify(&proof, message).is_ok()); +//! ``` +//! +//! Produce a pseudorandom output from a `Proof`: +//! +//! ``` +//! # use vrf::{traits::Uniform, ecvrf::*}; +//! # use rand::{rngs::StdRng, SeedableRng}; +//! # let message = b"Test message"; +//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]); +//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng); +//! # let public_key: VRFPublicKey = (&private_key).into(); +//! # let proof = private_key.prove(message); +//! let output: Output = (&proof).into(); +//! ``` + +use crate::traits::*; +use core::convert::TryFrom; +use curve25519_dalek::{ + constants::ED25519_BASEPOINT_POINT, digest::Update, edwards::{CompressedEdwardsY, EdwardsPoint}, scalar::Scalar as ed25519_Scalar +}; +use derive_deref::Deref; +use ed25519_dalek::{ + self, Digest, VerifyingKey as ed25519_PublicKey, SecretKey as ed25519_PrivateKey, SigningKey, Sha512, +}; +use serde::{Deserialize, Serialize}; + +const SUITE: u8 = 0x03; +const ONE: u8 = 0x01; +const TWO: u8 = 0x02; +const THREE: u8 = 0x03; + +/// The number of bytes of [`Output`] +pub const OUTPUT_LENGTH: usize = 64; +/// The number of bytes of [`Proof`] +pub const PROOF_LENGTH: usize = 80; + +/// An ECVRF private key +#[derive(Serialize, Deserialize, Deref, Debug)] +pub struct VRFPrivateKey(ed25519_PrivateKey); + +/// An ECVRF public key +#[derive(Serialize, Deserialize, Deref, Debug, PartialEq, Eq)] +pub struct VRFPublicKey(ed25519_PublicKey); + +/// A longer private key which is slightly optimized for proof generation. +/// +/// This is similar in structure to ed25519_dalek::ExpandedSecretKey. It can be produced from +/// a VRFPrivateKey. +pub struct VRFExpandedPrivateKey { + pub(super) key: ed25519_Scalar, + pub(super) nonce: [u8; 32], +} + +impl VRFPrivateKey { + /// Produces a proof for an input (using the private key) + pub fn prove(&self, alpha: &[u8]) -> Proof { + let sign_key = SigningKey::from_bytes(&self.0); + VRFExpandedPrivateKey::from(self).prove(&VRFPublicKey(sign_key.verifying_key()), alpha) + } +} + +impl VRFExpandedPrivateKey { + /// Produces a proof for an input (using the expanded private key) + pub fn prove(&self, pk: &VRFPublicKey, alpha: &[u8]) -> Proof { + let h_point = pk.hash_to_curve(alpha); + let k_scalar = + ed25519_Scalar::from_bytes_mod_order_wide(&nonce_generation_bytes(self.nonce, h_point)); + let gamma = h_point * self.key; + let c_scalar = hash_points(&[ + h_point, + gamma, + ED25519_BASEPOINT_POINT * k_scalar, + h_point * k_scalar, + ]); + + Proof { + gamma, + c: c_scalar, + s: k_scalar + c_scalar * self.key, + } + } +} + +impl Uniform for VRFPrivateKey { + fn generate_for_testing(rng: &mut R) -> Self + where + R: ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng, + { + let sign_key = SigningKey::generate(rng); + VRFPrivateKey(sign_key.to_bytes()) + } +} + +impl VRFPrivateKey { + pub fn generate_keypair(rng: &mut R) -> Self + where + R: ::rand::RngCore + ::rand::CryptoRng, + { + let sign_key = SigningKey::generate(rng); + VRFPrivateKey(sign_key.to_bytes()) + } +} + +impl TryFrom<&[u8]> for VRFPrivateKey { + type Error = CryptoMaterialError; + + fn try_from(bytes: &[u8]) -> std::result::Result { + let mut bits: [u8; 32] = [0u8; 32]; + bits.copy_from_slice(&bytes[..32]); + let sign_key = SigningKey::from_bytes(&bits); + Ok(VRFPrivateKey( + sign_key.to_bytes(), + )) + } +} + +impl TryFrom<&[u8]> for VRFPublicKey { + type Error = CryptoMaterialError; + + fn try_from(bytes: &[u8]) -> std::result::Result { + if bytes.len() != ed25519_dalek::PUBLIC_KEY_LENGTH { + return Err(CryptoMaterialError::WrongLengthError); + } + + let mut bits: [u8; 32] = [0u8; 32]; + bits.copy_from_slice(&bytes[..32]); + + let compressed = curve25519_dalek::edwards::CompressedEdwardsY(bits); + let point = compressed + .decompress() + .ok_or(CryptoMaterialError::DeserializationError)?; + + // Check if the point lies on a small subgroup. This is required + // when using curves with a small cofactor (in ed25519, cofactor = 8). + if point.is_small_order() { + return Err(CryptoMaterialError::SmallSubgroupError); + } + + Ok(VRFPublicKey(ed25519_PublicKey::from(point))) + } +} + +impl VRFPublicKey { + /// Given a [`Proof`] and an input, returns whether or not the proof is valid for the input + /// and public key + pub fn verify(&self, proof: &Proof, alpha: &[u8]) -> Result<(), anyhow::Error> { + let h_point = self.hash_to_curve(alpha); + let pk_point = CompressedEdwardsY::from_slice(self.as_bytes()) + .unwrap() + .decompress() + .unwrap(); + let cprime = hash_points(&[ + h_point, + proof.gamma, + ED25519_BASEPOINT_POINT * proof.s - pk_point * proof.c, + h_point * proof.s - proof.gamma * proof.c, + ]); + + if proof.c == cprime { + Ok(()) + } else { + anyhow::bail!("The proof failed to verify for this public key") + } + } + + pub(super) fn hash_to_curve(&self, alpha: &[u8]) -> EdwardsPoint { + let mut result = [0u8; 32]; + let mut counter = 0; + let mut wrapped_point: Option = None; + + while wrapped_point.is_none() { + let mut hasher = Sha512::new(); + Digest::update(&mut hasher, &[SUITE, ONE]); + Digest::update(&mut hasher, self.as_bytes()); + Digest::update(&mut hasher, alpha); + Digest::update(&mut hasher, &[counter]); + let hash_result = hasher.finalize(); + + result.copy_from_slice(&hash_result[..32]); + + wrapped_point = CompressedEdwardsY::from_slice(&result).unwrap().decompress(); + counter += 1; + } + + wrapped_point.unwrap().mul_by_cofactor() + } +} + +impl<'a> From<&'a VRFPrivateKey> for VRFPublicKey { + fn from(private_key: &'a VRFPrivateKey) -> Self { + let secret: &ed25519_PrivateKey = private_key; + let sign_key = SigningKey::from_bytes(&secret); + let public: ed25519_PublicKey = sign_key.verifying_key(); + VRFPublicKey(public) + } +} + +impl<'a> From<&'a VRFPrivateKey> for VRFExpandedPrivateKey { + fn from(private_key: &'a VRFPrivateKey) -> Self { + let mut h: Sha512 = Sha512::default(); + let mut hash: [u8; 64] = [0u8; 64]; + let mut lower: [u8; 32] = [0u8; 32]; + let mut upper: [u8; 32] = [0u8; 32]; + + Digest::update(&mut h, &private_key.0); + hash.copy_from_slice(h.finalize().as_slice()); + + lower.copy_from_slice(&hash[00..32]); + upper.copy_from_slice(&hash[32..64]); + + lower[0] &= 248; + lower[31] &= 63; + lower[31] |= 64; + + VRFExpandedPrivateKey { + key: ed25519_Scalar::from_bytes_mod_order(lower), + nonce: upper, + } + } +} + +/// A VRF proof that can be used to validate an input with a public key +pub struct Proof { + gamma: EdwardsPoint, + c: ed25519_Scalar, + s: ed25519_Scalar, +} + +impl Proof { + /// Produces a new Proof struct from its fields + pub fn new(gamma: EdwardsPoint, c: ed25519_Scalar, s: ed25519_Scalar) -> Proof { + Proof { gamma, c, s } + } + + /// Converts a Proof into bytes + pub fn to_bytes(&self) -> [u8; PROOF_LENGTH] { + let mut ret = [0u8; PROOF_LENGTH]; + ret[..32].copy_from_slice(&self.gamma.compress().to_bytes()[..]); + ret[32..48].copy_from_slice(&self.c.to_bytes()[..16]); + ret[48..].copy_from_slice(&self.s.to_bytes()[..]); + ret + } +} + +impl TryFrom<&[u8]> for Proof { + type Error = CryptoMaterialError; + + fn try_from(bytes: &[u8]) -> std::result::Result { + let mut c_buf = [0u8; 32]; + c_buf[..16].copy_from_slice(&bytes[32..48]); + let mut s_buf = [0u8; 32]; + s_buf.copy_from_slice(&bytes[48..]); + Ok(Proof { + gamma: CompressedEdwardsY::from_slice(&bytes[..32]) + .unwrap() + .decompress() + .unwrap(), + c: ed25519_Scalar::from_bytes_mod_order(c_buf), + s: ed25519_Scalar::from_bytes_mod_order(s_buf), + }) + } +} + +/// The ECVRF output produced from the proof +pub struct Output([u8; OUTPUT_LENGTH]); + +impl Output { + /// Converts an Output into bytes + #[inline] + pub fn to_bytes(&self) -> [u8; OUTPUT_LENGTH] { + self.0 + } +} + +impl<'a> From<&'a Proof> for Output { + fn from(proof: &'a Proof) -> Output { + let mut output = [0u8; OUTPUT_LENGTH]; + output.copy_from_slice( + &Sha512::new() + .chain(&[SUITE, THREE]) + .chain(&proof.gamma.mul_by_cofactor().compress().to_bytes()[..]) + .finalize()[..], + ); + Output(output) + } +} + +pub(super) fn nonce_generation_bytes(nonce: [u8; 32], h_point: EdwardsPoint) -> [u8; 64] { + let mut k_buf = [0u8; 64]; + k_buf.copy_from_slice( + &Sha512::new() + .chain(nonce) + .chain(h_point.compress().as_bytes()) + .finalize()[..], + ); + k_buf +} + +pub(super) fn hash_points(points: &[EdwardsPoint]) -> ed25519_Scalar { + let mut result = [0u8; 32]; + let mut hash = Sha512::new().chain(&[SUITE, TWO]); + for point in points.iter() { + hash = hash.chain(point.compress().to_bytes()); + } + result[..16].copy_from_slice(&hash.finalize()[..16]); + ed25519_Scalar::from_bytes_mod_order(result) +} diff --git a/crates/vrf/src/lib.rs b/crates/vrf/src/lib.rs new file mode 100644 index 0000000..db65bdd --- /dev/null +++ b/crates/vrf/src/lib.rs @@ -0,0 +1,27 @@ +//! This module contains implementations of a +//! [verifiable random function](https://en.wikipedia.org/wiki/Verifiable_random_function) +//! (currently only ECVRF). VRFs can be used in the consensus protocol for leader election. + +pub mod ecvrf; +pub mod traits; +pub mod test_utils; +pub mod sample; + +#[cfg(test)] +mod unit_tests; + + +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/crates/vrf/src/sample.rs b/crates/vrf/src/sample.rs new file mode 100644 index 0000000..e9bd1ac --- /dev/null +++ b/crates/vrf/src/sample.rs @@ -0,0 +1,54 @@ +use num_bigint::BigUint; +use num_traits::{One, Num}; + +#[derive(Debug, Clone, Copy)] +pub struct Sampler { + precision: usize +} + +impl Sampler { + pub fn new(precision: usize) -> Self { + Sampler { + precision + } + } + + pub fn hex_to_biguint(hex_str: &str) -> BigUint { + BigUint::from_str_radix(hex_str, 16).expect("Invalid hex string") + } + + pub fn calculate_threshold(&self, probability: f64) -> BigUint { + // percision is based at bit + let max_output = BigUint::one() << self.precision; + let threshold = max_output * BigUint::from((probability * 100.0) as u64) / BigUint::from(100u64); + threshold + } + + pub fn meets_threshold(&self, output: &BigUint, threshold: &BigUint) -> bool { + output < threshold + } +} + + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn meets() { + let sampler = Sampler::new(512); + let vrf_output_hex = "a64c292ec45f6b252828aff9a02a0fe88d2fcc7f5fc61bb328f03f4c6c0657a9d26efb23b87647ff54f71cd51a6fa4c4e31661d8f72b41ff00ac4d2eec2ea7b3"; + let vrf_output = Sampler::hex_to_biguint(vrf_output_hex); + + let target_probability = 0.1; + let threshold = sampler.calculate_threshold(target_probability); + let meets = sampler.meets_threshold(&vrf_output, &threshold); + if meets { + println!("Node is selected."); + } else { + println!("Node is not selected."); + } + + assert_eq!(meets, false); + } +} \ No newline at end of file diff --git a/crates/vrf/src/test_utils.rs b/crates/vrf/src/test_utils.rs new file mode 100644 index 0000000..0812cd6 --- /dev/null +++ b/crates/vrf/src/test_utils.rs @@ -0,0 +1,53 @@ +//! Internal module containing convenience utility functions mainly for testing + +use crate::traits::Uniform; +use bincode::serialize; +use serde::Serialize; + +/// A keypair consisting of a private and public key +#[derive(Clone)] +pub struct KeyPair +where + for<'a> P: From<&'a S>, +{ + pub private_key: S, + pub public_key: P, +} + +impl From for KeyPair +where + for<'a> P: From<&'a S>, +{ + fn from(private_key: S) -> Self { + KeyPair { + public_key: (&private_key).into(), + private_key, + } + } +} + +impl Uniform for KeyPair +where + S: Uniform, + for<'a> P: From<&'a S>, +{ + fn generate_for_testing(rng: &mut R) -> Self + where + R: ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng, + { + let private_key = S::generate_for_testing(rng); + private_key.into() + } +} + +impl std::fmt::Debug for KeyPair +where + Priv: Serialize, + Pub: Serialize + for<'a> From<&'a Priv>, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut v = serialize(&self.private_key).unwrap(); + v.extend(&serialize(&self.public_key).unwrap()); + write!(f, "{}", hex::encode(&v[..])) + } +} diff --git a/crates/vrf/src/traits.rs b/crates/vrf/src/traits.rs new file mode 100644 index 0000000..daf83e1 --- /dev/null +++ b/crates/vrf/src/traits.rs @@ -0,0 +1,221 @@ +//! This module provides a generic set of traits for dealing with cryptographic primitives. +//! +//! For examples on how to use these traits, see the implementations of the [`ed25519`] or +//! [`bls12381`] modules. + +use std::fmt; +use core::convert::{From, TryFrom}; +use proptest_derive::Arbitrary; +use serde::{Deserialize, Serialize}; +use std::{fmt::Debug, hash::Hash}; + +#[derive(Clone, Debug, Copy, Eq, Hash, PartialEq, Serialize, Deserialize, PartialOrd, Ord, Arbitrary)] +pub struct HashValue { + hash: [u8; 32], +} + +impl AsRef<[u8; 32]> for HashValue { + fn as_ref(&self) -> &[u8; 32] { + &self.hash + } +} + +/// An error type for key and signature validation issues, see [`ValidKey`][ValidKey]. +/// +/// This enum reflects there are two interesting causes of validation +/// failure for the ingestion of key or signature material: deserialization errors +/// (often, due to mangled material or curve equation failure for ECC) and +/// validation errors (material recognizable but unacceptable for use, +/// e.g. unsafe). +#[derive(Clone, Debug, PartialEq, Eq, thiserror::Error)] +pub enum CryptoMaterialError { + /// Key or signature material does not deserialize correctly. + #[error("DeserializationError")] + DeserializationError, + /// Key or signature material deserializes, but is otherwise not valid. + #[error("ValidationError")] + ValidationError, + /// Key or signature material does not have the expected size. + #[error("WrongLengthError")] + WrongLengthError, + /// Part of the signature or key is not canonical resulting to malleability issues. + #[error("CanonicalRepresentationError")] + CanonicalRepresentationError, + /// A curve point (i.e., a public key) lies on a small group. + #[error("SmallSubgroupError")] + SmallSubgroupError, + /// A curve point (i.e., a public key) does not satisfy the curve equation. + #[error("PointNotOnCurveError")] + PointNotOnCurveError, +} + +/// Key material with a notion of byte validation. +/// +/// A type family for material that knows how to serialize and +/// deserialize, as well as validate byte-encoded material. The +/// validation must be implemented as a [`TryFrom`][TryFrom] which +/// classifies its failures against the above +/// [`CryptoMaterialError`][CryptoMaterialError]. +/// +/// This provides an implementation for a validation that relies on a +/// round-trip to bytes and corresponding [`TryFrom`][TryFrom]. +pub trait ValidKey: + // The for<'a> exactly matches the assumption "deserializable from any lifetime". + for<'a> TryFrom<&'a [u8], Error = CryptoMaterialError> + Debug +{ + /// TryFrom is the source of truth on whether we can build a valid key. + /// => we can use it once we've built, to validate! + fn validate(&self) -> std::result::Result<(), CryptoMaterialError> { + Self::try_from(self.to_bytes().as_slice())?; + Ok(()) + } + + /// Convert the valid key to bytes. + fn to_bytes(&self) -> Vec; +} + +/// An extension to to/from Strings for [`ValidKey`][ValidKey]. +/// +/// Relies on [`hex`][::hex] for string encoding / decoding. +/// No required fields, provides a default implementation. +pub trait ValidKeyStringExt: ValidKey { + /// When trying to convert from bytes, we simply decode the string into + /// bytes before checking if we can convert. + fn from_encoded_string(encoded_str: &str) -> std::result::Result { + let bytes_out = ::hex::decode(encoded_str); + // We defer to `try_from` to make sure we only produce valid keys. + bytes_out + // We reinterpret a failure to serialize: key is mangled someway. + .or(Err(CryptoMaterialError::DeserializationError)) + .and_then(|ref bytes| Self::try_from(bytes)) + } + /// A function to encode into hex-string after serializing. + fn to_encoded_string(&self) -> Result { + Ok(::hex::encode(&self.to_bytes())) + } +} + +// There's nothing required in this extension, so let's just derive it +// for anybody that has a ValidKey. +impl ValidKeyStringExt for T {} + +/// A type family for key material that should remain secret and has an +/// associated type of the [`PublicKey`][PublicKey] family. +pub trait PrivateKey: ValidKey { + /// We require public / private types to be coupled, i.e. their + /// associated type is each other. + type PublicKeyMaterial: PublicKey; +} + +/// A type family of valid keys that know how to sign. +/// +/// A trait for a [`ValidKey`][ValidKey] which knows how to sign a +/// message, and return an associated `Signature` type. +pub trait SigningKey: + PrivateKey::VerifyingKeyMaterial> +{ + /// The associated verifying key for this signing key. + type VerifyingKeyMaterial: VerifyingKey; + /// The associated signature for this signing key. + type SignatureMaterial: Signature; + + /// Signs an input message. + fn sign_message(&self, message: &HashValue) -> Self::SignatureMaterial; +} + +/// A type for key material that can be publicly shared, and in asymmetric +/// fashion, can be obtained from a [`PrivateKey`][PrivateKey] +/// reference. +/// This convertibility requirement ensures the existence of a +/// deterministic, canonical public key construction from a private key. +pub trait PublicKey: ValidKey + Clone + Eq + Hash + + // This unsightly turbofish type parameter is the precise constraint + // needed to require that there exists an + // + // ``` + // impl From<&MyPrivateKeyMaterial> for MyPublicKeyMaterial + // ``` + // + // declaration, for any `MyPrivateKeyMaterial`, `MyPublicKeyMaterial` + // on which we register (respectively) `PublicKey` and `PrivateKey` + // implementations. + for<'a> From<&'a ::PrivateKeyMaterial> { + /// We require public / private types to be coupled, i.e. their + /// associated type is each other. + type PrivateKeyMaterial: PrivateKey; + /// The length of the [`PublicKey`] + fn length() -> usize; + +} + +/// A type family of public keys that are used for signing. +/// +/// It is linked to a type of the Signature family, which carries the +/// verification implementation. +pub trait VerifyingKey: + PublicKey::SigningKeyMaterial> +{ + /// The associated signing key for this verifying key. + type SigningKeyMaterial: SigningKey; + /// The associated signature for this verifying key. + type SignatureMaterial: Signature; + + /// We provide the logical implementation which dispatches to the signature. + fn verify_signature( + &self, + message: &HashValue, + signature: &Self::SignatureMaterial, + ) -> Result<(), anyhow::Error> { + signature.verify(message, self) + } +} + +/// A type family for signature material that knows which public key type +/// is needed to verify it, and given such a public key, knows how to +/// verify. +/// +/// This trait simply requires an association to some type of the +/// [`PublicKey`][PublicKey] family of which we are the `SignatureMaterial`. +/// +/// It should be possible to write a generic signature function that +/// checks signature material passed as `&[u8]` and only returns Ok when +/// that material de-serializes to a signature of the expected concrete +/// scheme. This would be done as an extension trait of +/// [`Signature`][Signature]. +pub trait Signature: + for<'a> TryFrom<&'a [u8], Error = CryptoMaterialError> + Sized + Debug + Clone + Eq + Hash +{ + /// The associated verifying key for this signature. + type VerifyingKeyMaterial: VerifyingKey; + + /// The verification function. + fn verify(&self, message: &HashValue, public_key: &Self::VerifyingKeyMaterial) -> Result<(), anyhow::Error>; + + /// Native verification function. + fn verify_arbitrary_msg( + &self, + message: &[u8], + public_key: &Self::VerifyingKeyMaterial, + ) -> Result<(), anyhow::Error>; + + /// Convert the signature into a byte representation. + fn to_bytes(&self) -> Vec; +} + +/// An alias for the RNG used in the [`Uniform`] trait. +// pub trait SeedableCryptoRng = ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng; + +/// A type family for schemes which know how to generate key material from +/// a cryptographically-secure [`CryptoRng`][::rand::CryptoRng]. +pub trait Uniform { + /// Generate key material from an RNG for testing purposes. + fn generate_for_testing(rng: &mut R) -> Self + where + R: ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng; +} + +/// A type family with a by-convention notion of genesis private key. +pub trait Genesis: PrivateKey { + /// Produces the genesis private key. + fn genesis() -> Self; +} diff --git a/crates/vrf/src/unit_tests/mod.rs b/crates/vrf/src/unit_tests/mod.rs new file mode 100644 index 0000000..a35f50c --- /dev/null +++ b/crates/vrf/src/unit_tests/mod.rs @@ -0,0 +1,26 @@ +mod vrf_test; + +use crate::{ + test_utils::KeyPair, + traits::Uniform, +}; +use proptest::prelude::*; +use rand::rngs::StdRng; +use rand::SeedableRng; +use serde::Serialize; + +/// Produces a uniformly random keypair from a seed +pub(super) fn uniform_keypair_strategy() -> impl Strategy> +where + Pub: Serialize + for<'a> From<&'a Priv>, + Priv: Serialize + Uniform, +{ + // The no_shrink is because keypairs should be fixed -- shrinking would cause a different + // keypair to be generated, which appears to not be very useful. + any::<[u8; 32]>() + .prop_map(|seed| { + let mut rng = StdRng::from_seed(seed); + KeyPair::::generate_for_testing(&mut rng) + }) + .no_shrink() +} diff --git a/crates/vrf/src/unit_tests/vrf_test.rs b/crates/vrf/src/unit_tests/vrf_test.rs new file mode 100644 index 0000000..e0f6959 --- /dev/null +++ b/crates/vrf/src/unit_tests/vrf_test.rs @@ -0,0 +1,202 @@ +use crate::traits::HashValue; +use crate::{unit_tests::uniform_keypair_strategy, ecvrf::*}; +use core::convert::TryFrom; +use curve25519_dalek::{ + constants::ED25519_BASEPOINT_POINT, edwards::CompressedEdwardsY, + scalar::Scalar as ed25519_Scalar, +}; +use proptest::prelude::*; +use rand::rngs::OsRng; + +macro_rules! to_string { + ($e:expr) => { + format!("{}", ::hex::encode($e.to_bytes().as_ref())) + }; +} + +macro_rules! from_string { + (CompressedEdwardsY, $e:expr) => { + CompressedEdwardsY::from_slice(&::hex::decode($e).unwrap()) + .unwrap() + .decompress() + .unwrap() + }; + (VRFPublicKey, $e:expr) => {{ + let v: &[u8] = &::hex::decode($e).unwrap(); + VRFPublicKey::try_from(v).unwrap() + }}; + ($t:ty, $e:expr) => { + <$t>::try_from(::hex::decode($e).unwrap().as_ref()).unwrap() + }; +} + +#[allow(dead_code, non_snake_case)] +struct VRFTestVector { + SK: &'static str, + PK: &'static str, + alpha: &'static [u8], + x: &'static str, + H: &'static str, + k: &'static str, + U: &'static str, + V: &'static str, + pi: &'static str, + beta: &'static str, +} + +const TESTVECTORS : [VRFTestVector; 3] = [ + VRFTestVector { + SK : "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + PK : "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a", + alpha : b"", + x : "7c2cac12e69be96ae9065065462385e8fcff2768d980c0a3a520f006904de90f", + // try_and_increment succeeded on ctr = 0 + H : "5b2c80db3ce2d79cc85b1bfb269f02f915c5f0e222036dc82123f640205d0d24", + k : "647ac2b3ca3f6a77e4c4f4f79c6c4c8ce1f421a9baaa294b0adf0244915130f7067640acb6fd9e7e84f8bc30d4e03a95e410b82f96a5ada97080e0f187758d38", + U : "a21c342b8704853ad10928e3db3e58ede289c798e3cdfd485fbbb8c1b620604f", + V : "426fe41752f0b27439eb3d0c342cb645174a720cae2d4e9bb37de034eefe27ad", + pi : "9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a", + beta : "a64c292ec45f6b252828aff9a02a0fe88d2fcc7f5fc61bb328f03f4c6c0657a9d26efb23b87647ff54f71cd51a6fa4c4e31661d8f72b41ff00ac4d2eec2ea7b3", + }, + VRFTestVector { + SK : "4ccd089b28ff96da9db6c346ec114e0f5b8a319f35aba624da8cf6ed4fb8a6fb", + PK : "3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c", + alpha : b"\x72", + x : "c799d106d5927970e5989f5671131fa27e6c6b3b7f821c5e259a24b02e502e01", + // try_and_increment succeeded on ctr = 4 + H : "08e18a34f3923db32e80834fb8ced4e878037cd0459c63ddd66e5004258cf76c", + k : "627237308294a8b344a09ad893997c630153ee514cd292eddd577a9068e2a6f24cbee0038beb0b1ee5df8be08215e9fc74608e6f9358b0e8d6383b1742a70628", + U : "18b5e500cb34690ced061a0d6995e2722623c105221eb91b08d90bf0491cf979", + V : "87e1f47346c86dbbd2c03eafc7271caa1f5307000a36d1f71e26400955f1f627", + pi : "84a63e74eca8fdd64e9972dcda1c6f33d03ce3cd4d333fd6cc789db12b5a7b9d03f1cb6b2bf7cd81a2a20bacf6e1c04e59f2fa16d9119c73a45a97194b504fb9a5c8cf37f6da85e03368d6882e511008", + beta : "cddaa399bb9c56d3be15792e43a6742fb72b1d248a7f24fd5cc585b232c26c934711393b4d97284b2bcca588775b72dc0b0f4b5a195bc41f8d2b80b6981c784e", + }, + VRFTestVector { + SK : "c5aa8df43f9f837bedb7442f31dcb7b166d38535076f094b85ce3a2e0b4458f7", + PK : "fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025", + alpha : b"\xaf\x82", + x : "ef76bea4dae9a6cb6013cf2cbce0e2a8b94d7f4ec5c2f51b1325a181991ea90c", + // try_and_increment succeeded on ctr = 0 + H : "e4581824b70badf0e57af789dd8cf85513d4b9814566de0e3f738439becfba33", + k : "a950f736af2e3ae2dbcb76795f9cbd57c671eee64ab17069f945509cd6c4a74852fe1bbc331e1bd573038ec703ca28601d861ad1e9684ec89d57bc22986acb0e", + U : "5114dc4e741b7c4a28844bc585350240a51348a05f337b5fd75046d2c2423f7a", + V : "a6d5780c472dea1ace78795208aaa05473e501ed4f53da57e1fb13b7e80d7f59", + pi : "aca8ade9b7f03e2b149637629f95654c94fc9053c225ec21e5838f193af2b727b84ad849b0039ad38b41513fe5a66cdd2367737a84b488d62486bd2fb110b4801a46bfca770af98e059158ac563b690f", + beta : "d938b2012f2551b0e13a49568612effcbdca2aed5d1d3a13f47e180e01218916e049837bd246f66d5058e56d3413dbbbad964f5e9f160a81c9a1355dcd99b453", + }, +]; + +#[test] +fn gen_keypair_prove_and_verify() { + let message = b"Test message, hello, hetu!"; + let private_key = VRFPrivateKey::generate_keypair(&mut OsRng); + let public_key: VRFPublicKey = (&private_key).into(); + let proof = private_key.prove(message); + assert!(public_key.verify(&proof, message).is_ok()); + let random_num: Output = (&proof).into(); + println!("random output: {:?}", ::hex::encode(random_num.to_bytes())); +} + +#[test] +fn test_expand_secret_key() { + for tv in TESTVECTORS.iter() { + let sk = from_string!(VRFPrivateKey, tv.SK); + let esk = VRFExpandedPrivateKey::from(&sk); + let pk = VRFPublicKey::try_from(&sk).unwrap(); + assert_eq!(tv.PK, to_string!(pk)); + assert_eq!(tv.x, to_string!(esk.key)); + } +} + +#[test] +fn test_hash_to_curve() { + for tv in TESTVECTORS.iter() { + let pk = from_string!(VRFPublicKey, tv.PK); + let h_point = pk.hash_to_curve(&tv.alpha); + assert_eq!(tv.H, to_string!(h_point.compress())); + } +} + +#[test] +fn test_nonce_generation() { + for tv in TESTVECTORS.iter() { + let sk = VRFExpandedPrivateKey::from(&from_string!(VRFPrivateKey, tv.SK)); + let h_point = from_string!(CompressedEdwardsY, tv.H); + let k = nonce_generation_bytes(sk.nonce, h_point); + assert_eq!(tv.k, ::hex::encode(&k[..])); + } +} + +#[test] +fn test_hash_points() { + for tv in TESTVECTORS.iter() { + let sk = VRFExpandedPrivateKey::from(&from_string!(VRFPrivateKey, tv.SK)); + let h_point = from_string!(CompressedEdwardsY, tv.H); + let k_bytes = nonce_generation_bytes(sk.nonce, h_point); + let k_scalar = ed25519_Scalar::from_bytes_mod_order_wide(&k_bytes); + + let gamma = h_point * sk.key; + let u = ED25519_BASEPOINT_POINT * k_scalar; + let v = h_point * k_scalar; + + assert_eq!(tv.U, to_string!(u.compress())); + assert_eq!(tv.V, to_string!(v.compress())); + + let c_scalar = hash_points(&[h_point, gamma, u, v]); + + let s_scalar = k_scalar + c_scalar * sk.key; + let s_scalar = ed25519_Scalar::from_bytes_mod_order(s_scalar.to_bytes()); + + let pi = Proof::new(gamma, c_scalar, s_scalar); + + assert_eq!(tv.pi, to_string!(pi)); + } +} + +#[test] +fn test_prove() { + for tv in TESTVECTORS.iter() { + let sk = from_string!(VRFPrivateKey, tv.SK); + let pi = sk.prove(tv.alpha); + + assert_eq!(tv.pi, to_string!(pi)); + } +} + +#[test] +fn test_verify() { + for tv in TESTVECTORS.iter() { + assert!(from_string!(VRFPublicKey, tv.PK) + .verify(&from_string!(Proof, tv.pi), tv.alpha) + .is_ok()); + } +} + +#[test] +fn test_output_from_proof() { + for tv in TESTVECTORS.iter() { + assert_eq!( + tv.beta, + to_string!(Output::from( + &from_string!(VRFPrivateKey, tv.SK).prove(tv.alpha) + )) + ); + } +} + +proptest! { + #[test] + fn test_prove_and_verify( + hash1 in any::(), + hash2 in any::(), + keypair in uniform_keypair_strategy::() + ) { + let (pk, sk) = (&keypair.public_key, &keypair.private_key); + let pk_test = VRFPublicKey::try_from(sk).unwrap(); + prop_assert_eq!(pk, &pk_test); + let (input1, input2) = (hash1.as_ref(), hash2.as_ref()); + let proof1 = sk.prove(input1); + prop_assert!(pk.verify(&proof1, input1).is_ok()); + prop_assert!(pk.verify(&proof1, input2).is_err()); + } +} diff --git a/demos/README.md b/demos/README.md index 09f8ca6..410bc9d 100644 --- a/demos/README.md +++ b/demos/README.md @@ -25,6 +25,16 @@ Randomness serves a vital role in nearly every aspect of current society,the i #### VLC & VRF Proposal Randomness serves a vital role in nearly every aspect of current society,the idea is to intergrate the ablility of logical clocks into random generator. To generate verifiable, fair random numbers, the proposal integrates VRF. +## [tee_vlc](./tee_vlc/) + +This module verifiable logic clock is an implementation of Chronos's TEE backend. + +And some features as follow: + +* Use the aws nitro enclave as its trust execution environment. +* Support functions test and press test cases. + + ## [Test-Conflict](./test_conflict/) This use case domo is designed to detect software version conflict by applied vector clock. diff --git a/demos/coll-tx/Cargo.toml b/demos/coll_tx/Cargo.toml similarity index 93% rename from demos/coll-tx/Cargo.toml rename to demos/coll_tx/Cargo.toml index 9694360..7c4fb79 100644 --- a/demos/coll-tx/Cargo.toml +++ b/demos/coll_tx/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "coll-tx" +name = "coll_tx" version = "0.1.0" edition = "2021" diff --git a/demos/coll-tx/src/lib.rs b/demos/coll_tx/src/lib.rs similarity index 100% rename from demos/coll-tx/src/lib.rs rename to demos/coll_tx/src/lib.rs diff --git a/demos/coll-tx/src/simple_utxo.rs b/demos/coll_tx/src/simple_utxo.rs similarity index 99% rename from demos/coll-tx/src/simple_utxo.rs rename to demos/coll_tx/src/simple_utxo.rs index 558b1cb..f55715e 100644 --- a/demos/coll-tx/src/simple_utxo.rs +++ b/demos/coll_tx/src/simple_utxo.rs @@ -1,7 +1,7 @@ //! A simple utxo structure for testing case. use std::io; -use sha2::{Sha256, Digest}; +use sha2::Digest; /// SimpleUTXO input #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/demos/tee_vlc/Cargo.toml b/demos/tee_vlc/Cargo.toml new file mode 100644 index 0000000..44c62ac --- /dev/null +++ b/demos/tee_vlc/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "tee_vlc" +version = "0.1.0" +edition = "2021" + +[features] +ordinary = [ + "nitro-enclaves", + "reqwest", +] +nitro-enclaves = ["aws-nitro-enclaves-nsm-api", "aws-nitro-enclaves-attestation"] + + +[dependencies] +bincode = "1.3.3" +blake2 = "0.10.6" +bytes = "1.5.0" +derive_more = "0.99.17" +derive-where = "1.2.7" +tracing = "0.1.40" +tracing-subscriber = "0.3.18" +rand = "0.8.5" +vlc ={ path = "../../crates/vlc", version = "0.1.0"} +types ={ path = "../../crates/types", version = "0.1.0"} +crypto ={ path = "../../crates/crypto", version = "0.1.0"} +enclaves ={ path = "../../crates/enclaves", version = "0.1.0"} +serde = { version = "1.0.195", features = ["derive"] } +nix = { version = "0.28.0", features = ["socket", "sched", "resource"] } +tikv-jemallocator = { version = "0.5.4", optional = true } +tokio = { version = "1.35.1", features = ["net", "time", "sync", "rt", "signal", "macros", "rt-multi-thread", "fs", "process", "io-util"] } +tokio-util = "0.7.10" +anyhow = { version = "1.0.79", features = ["backtrace"] } +reqwest = { version = "0.12.4", features = ["json", "multipart"], optional = true } +aws-nitro-enclaves-nsm-api = { version = "0.4.0", optional = true } +aws-nitro-enclaves-attestation = { git = "https://github.com/neatsys/aws-nitro-enclaves-attestation", version = "0.1.0", optional = true } diff --git a/demos/tee_vlc/README.md b/demos/tee_vlc/README.md new file mode 100644 index 0000000..b55b5ba --- /dev/null +++ b/demos/tee_vlc/README.md @@ -0,0 +1,46 @@ +# VLC In TEE + +This module verifiable logic clock is an implementation of Chronos's TEE backend. + +## Prepare environment + +Now, this repository uses the aws nitro enclave as its trust execution environment. + +So, please create a cloud virtual instance and notice choose the `Amazon-2023 linux` as base image. +Because this base operator system is more friendly for using of the aws nitro enclave. + +### Prepare Env & Configuration + +1. Prepare Env & install dependency tools +```sh +sudo sudo dnf upgrade +sudo dnf install -y tmux htop openssl-devel perl docker-24.0.5-1.amzn2023.0.3 aws-nitro-enclaves-cli aws-nitro-enclaves-cli-devel +``` + +2. Configuration + +Please `cat /etc/nitro_enclaves/allocator.yaml` and set cpu_count & memory_mib. For tee_vlc: just `2 core + 1024 M` is enough, for tee_llm: `4 core + 16384 M` at least. Update the file and save it. + +3. run `init.sh` + +```sh +cd scripts +sudo chmod +x init_env.sh +./init_env.sh +``` +Remember please re-run the script when you update the `/etc/nitro_enclaves/allocator.yaml`. + + +## Run VLC TEE Images + +```bash +cd image +cargo run --bin run-solo-vlc-enclave -- . --features nitro-enclaves +``` + +## Testing + +```bash +cargo run --bin call_vlc_client --features nitro-enclaves +``` + diff --git a/demos/tee_vlc/image/Dockerfile b/demos/tee_vlc/image/Dockerfile new file mode 100644 index 0000000..611d75a --- /dev/null +++ b/demos/tee_vlc/image/Dockerfile @@ -0,0 +1,5 @@ +FROM alpine:latest + +COPY tee_vlc . + +CMD ./tee_vlc \ No newline at end of file diff --git a/demos/tee_vlc/image/run.sh b/demos/tee_vlc/image/run.sh new file mode 100644 index 0000000..ef5f530 --- /dev/null +++ b/demos/tee_vlc/image/run.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +if [ "$1" = "debug" ]; then + nitro-cli run-enclave --cpu-count 2 --memory 2048 --enclave-cid 16 --eif-path app.eif --attach-console +else + nitro-cli run-enclave --cpu-count 2 --memory 2048 --enclave-cid 16 --eif-path app.eif +fi \ No newline at end of file diff --git a/demos/tee_vlc/src/bin/call_vlc_client.rs b/demos/tee_vlc/src/bin/call_vlc_client.rs new file mode 100644 index 0000000..1f585c4 --- /dev/null +++ b/demos/tee_vlc/src/bin/call_vlc_client.rs @@ -0,0 +1,194 @@ +use std::{ + env, + fmt::Write, + future::pending, + time::Duration, +}; + +use vlc::ordinary_clock::OrdinaryClock; +use tee_vlc::nitro_clock::{nitro_enclaves_portal_session, NitroEnclavesClock, Update, UpdateOk}; +use tokio::{ + sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + time::{sleep, timeout, Instant}, +}; + +// tee id +const CID: u32 = 16; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> anyhow::Result<()> { + let args: Vec = env::args().collect(); + + let num_concurrent = if args.len() > 1 { + args[1].parse::().ok() + } else { + None + }; + + let run_nitro_client = { + let (update_sender, update_receiver) = unbounded_channel(); + let (update_ok_sender, mut update_ok_receiver) = unbounded_channel::>(); + tokio::spawn({ + let update_sender = update_sender.clone(); + async move { + pending::<()>().await; + drop(update_sender) + } + }); + ( + tokio::spawn(nitro_enclaves_portal_session( + CID, + 5006, + update_receiver, + update_ok_sender, + )), + tokio::spawn(async move { + let verify = |clock: NitroEnclavesClock| { + let document = clock.verify()?; + anyhow::ensure!(document.is_some()); + Ok(()) + }; + let mut lines = String::new(); + if let Some(num_concurrent) = num_concurrent { + for size in (0..=12).step_by(2).map(|n| 1 << n) { + stress_bench_session( + size, + 0, + num_concurrent, + &update_sender, + &mut update_ok_receiver, + &mut lines, + ) + .await?; + } + // println!("{lines}") + } else { + println!("key, num_merged, deserialize_tee, verify_proof_tee, update_clock_tee, gen_clock_proof_tee, total_in_tee, net_round"); + for size in (0..=16).step_by(2).map(|n| 1 << n) { + bench_session( + size, + 0, + &update_sender, + &mut update_ok_receiver, + verify, + &mut lines, + ) + .await? + } + for num_merged in 0..=15 { + bench_session( + 1 << 10, + num_merged, + &update_sender, + &mut update_ok_receiver, + verify, + &mut lines, + ) + .await? + } + println!("{lines}") + } + + anyhow::Ok(()) + }), + ) + }; + + let (portal_session, session) = run_nitro_client; + 'select: { + tokio::select! { + result = session => break 'select result??, + result = portal_session => result??, + } + anyhow::bail!("unreachable") + } + Ok(()) +} + +async fn bench_session + Clone + Send + Sync + 'static>( + size: usize, + num_merged: usize, + update_sender: &UnboundedSender>, + update_ok_receiver: &mut UnboundedReceiver>, + verify: impl Fn(C) -> anyhow::Result<()>, + lines: &mut String, +) -> anyhow::Result<()> +where + C::Error: Into, +{ + let clock = + C::try_from(OrdinaryClock((0..size).map(|i| (i as _, 0)).collect())).map_err(Into::into)?; + let start = Instant::now(); + update_sender.send(Update(clock, Default::default(), 0))?; + let Some((_, clock, elapsed)) = update_ok_receiver.recv().await else { + anyhow::bail!("missing UpdateOk") + }; + let net_round = start.elapsed(); + println!( + "{size}, {num_merged}, {:?}, {:?}, {:?}, {:?}, {:?}, {:?}", + elapsed[0], elapsed[1], elapsed[2], elapsed[3], elapsed[4], net_round + ); + + for _ in 0..5 { + sleep(Duration::from_millis(100)).await; + let update = Update(clock.clone(), vec![clock.clone(); num_merged], 0); + let start = Instant::now(); + update_sender.send(update)?; + let Some((_, clock, elapsed_in_tee)) = update_ok_receiver.recv().await else { + anyhow::bail!("missing UpdateOk") + }; + let elapsed = start.elapsed(); + // eprintln!("{size:8} {num_merged:3} {elapsed:?}"); + println!( + "{size}, {num_merged}, {:?}, {:?}, {:?}, {:?}, {:?}, {:?}", + elapsed_in_tee[0], elapsed_in_tee[1], elapsed_in_tee[2], elapsed_in_tee[3], elapsed_in_tee[4], elapsed + ); + writeln!(lines, "{size},{num_merged},{}ms", elapsed.as_millis())?; + verify(clock)? + } + Ok(()) +} + +async fn stress_bench_session + Clone + Send + Sync + 'static>( + size: usize, + num_merged: usize, + num_concurrent: usize, + update_sender: &UnboundedSender>, + update_ok_receiver: &mut UnboundedReceiver>, + lines: &mut String, +) -> anyhow::Result<()> +where + C::Error: Into, +{ + let clock = + C::try_from(OrdinaryClock((0..size).map(|i| (i as _, 0)).collect())).map_err(Into::into)?; + for i in 0..num_concurrent { + update_sender.send(Update(clock.clone(), Default::default(), i as _))?; + } + let mut count = 0; + let close_loops_session = async { + while let Some((id, clock, _elapsed)) = update_ok_receiver.recv().await { + count += 1; + let update = Update(clock.clone(), vec![clock.clone(); num_merged], id); + update_sender.send(update)? + } + anyhow::Ok(()) + }; + match timeout(Duration::from_secs(10), close_loops_session).await { + Err(_) => {} + Ok(result) => { + result?; + anyhow::bail!("unreachable") + } + } + println!( + "key {size},merged {num_merged},counts {count}, tps {}", + count as f32 / 10. + ); + writeln!( + lines, + "{size},{num_merged},{count},{}", + count as f32 / 10. + )?; + Ok(()) +} diff --git a/demos/tee_vlc/src/bin/run-solo-vlc-enclave.rs b/demos/tee_vlc/src/bin/run-solo-vlc-enclave.rs new file mode 100644 index 0000000..5f4b698 --- /dev/null +++ b/demos/tee_vlc/src/bin/run-solo-vlc-enclave.rs @@ -0,0 +1,71 @@ +use tokio::process::Command; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> anyhow::Result<()> { + // the path of dockerfile and app.eif putted path + let item = std::env::args().nth(1); + println!("* Install Nitro CLI"); + let status = Command::new("sh") + .arg("-c") + .arg( + String::from("sudo dnf install -y tmux htop openssl-devel perl docker-24.0.5-1.amzn2023.0.3 aws-nitro-enclaves-cli aws-nitro-enclaves-cli-devel") + + " && sudo usermod -aG ne ec2-user" + + " && sudo usermod -aG docker ec2-user" + + " && sudo systemctl restart docker" + + " && sudo systemctl restart nitro-enclaves-allocator.service" + + " && sudo systemctl enable --now nitro-enclaves-allocator.service" + + " && sudo systemctl enable --now docker" + ) + .status() + .await?; + anyhow::ensure!(status.success()); + + println!("* Build artifact"); + let status = Command::new("cargo") + .args([ + "build", + "--target", + "x86_64-unknown-linux-musl", + "--profile", + "artifact", + "--features", + "nitro-enclaves,tikv-jemallocator", + "--bin", + "tee_vlc", + ]) + .status() + .await?; + anyhow::ensure!(status.success()); + + println!("* cp artifact"); + let status = Command::new("cp") + .arg("target/x86_64-unknown-linux-musl/artifact/tee_vlc") + .arg(item.clone().ok_or(anyhow::format_err!("missing destination path"))?) + .status() + .await?; + anyhow::ensure!(status.success()); + + println!("* cd docker folder and build enclave image file"); + let status = Command::new("sh") + .arg("-c") + .arg(format!( + "cd {} && docker build . -t tee_vlc && nitro-cli build-enclave --docker-uri tee_vlc:latest --output-file tee_vlc.eif", + item.clone().ok_or(anyhow::format_err!("missing destination path"))? + )) + .status() + .await?; + anyhow::ensure!(status.success()); + + println!("* cd dockerfile folder and run enclave image"); + let status = Command::new("sh") + .arg("-c") + .arg(format!( + "cd {} && nitro-cli run-enclave --cpu-count 2 --memory 2048 --enclave-cid 16 --eif-path tee_vlc.eif", + item.ok_or(anyhow::format_err!("missing destination path"))? + )) + .status() + .await?; + anyhow::ensure!(status.success()); + + Ok(()) +} diff --git a/demos/tee_vlc/src/lib.rs b/demos/tee_vlc/src/lib.rs new file mode 100644 index 0000000..011a655 --- /dev/null +++ b/demos/tee_vlc/src/lib.rs @@ -0,0 +1,13 @@ +pub mod nitro_clock; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Clocked { + pub clock: C, + pub inner: M, +} + +pub trait Verify: Send + Sync + 'static { + fn verify_clock(&self, num_faulty: usize, state: &S) -> anyhow::Result<()>; +} \ No newline at end of file diff --git a/demos/tee_vlc/src/main.rs b/demos/tee_vlc/src/main.rs new file mode 100644 index 0000000..d4700ff --- /dev/null +++ b/demos/tee_vlc/src/main.rs @@ -0,0 +1,7 @@ +use tee_vlc::nitro_clock::NitroEnclavesClock; + +#[tokio::main] +#[cfg(feature = "nitro-enclaves")] +async fn main() -> anyhow::Result<()> { + NitroEnclavesClock::run(5006).await +} \ No newline at end of file diff --git a/demos/tee_vlc/src/nitro_clock.rs b/demos/tee_vlc/src/nitro_clock.rs new file mode 100644 index 0000000..e3938e4 --- /dev/null +++ b/demos/tee_vlc/src/nitro_clock.rs @@ -0,0 +1,282 @@ +use std::{sync::Arc, time::Duration}; +use bincode::Options; +use types::raw_wrapper::Payload; +use crypto::core::DigestHash; +use enclaves::nitro_secure::{HandleFn, NitroSecureModule as NitroSecure}; +use vlc::ordinary_clock::{Clock, LamportClock, OrdinaryClock}; +use derive_where::derive_where; +use serde::{Deserialize, Serialize}; +use tokio::{sync::mpsc::{UnboundedReceiver, UnboundedSender}, time::Instant}; +use tracing::*; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Update(pub C, pub Vec, pub u64); + +// feel lazy to define event type for replying +pub type UpdateOk = (u64, C, Vec); + +#[derive(Debug, Clone, Default, derive_more::AsRef, Serialize, Deserialize)] +#[derive_where(PartialOrd, PartialEq)] +pub struct NitroEnclavesClock { + #[as_ref] + pub plain: OrdinaryClock, + #[derive_where(skip)] + pub document: Payload, +} + +impl TryFrom for NitroEnclavesClock { + type Error = anyhow::Error; + + fn try_from(value: OrdinaryClock) -> Result { + anyhow::ensure!(value.is_genesis(), "OrdinaryClock is not in genesis state"); + Ok(Self { + plain: value, + document: Default::default(), + }) + } +} + +impl Clock for NitroEnclavesClock { + fn reduce(&self) -> LamportClock { + self.plain.reduce() + } +} + +// technically `feature = "aws-nitro-enclaves-attestation"` is sufficient for +// attestation, NSM API is only depended by `NitroSecureModule` that running +// inside enclaves image +#[cfg(feature = "nitro-enclaves")] +impl NitroEnclavesClock { + pub fn verify( + &self, + ) -> anyhow::Result> { + if self.plain.is_genesis() { + return Ok(None); + } + use aws_nitro_enclaves_attestation::{AttestationProcess as _, AWS_ROOT_CERT}; + use aws_nitro_enclaves_nsm_api::api::AttestationDoc; + let document = AttestationDoc::from_bytes( + &self.document, + AWS_ROOT_CERT, + std::time::SystemTime::UNIX_EPOCH + .elapsed() + .unwrap() + .as_secs(), + )?; + use DigestHash as _; + anyhow::ensure!( + document.user_data.as_ref().map(|user_data| &***user_data) + == Some(&self.plain.sha256().to_fixed_bytes()[..]) + ); + Ok(Some(document)) + } + + pub fn worker() -> HandleFn { + Arc::new(|buf, nsm, pcrs, write_sender| { + Box::pin(async move { + // IO action in tee is severe delay, just debug + // println!("Received buffer: {:?}", buf); + // let _ = io::stdout().flush(); + + // if production env, need to remove time slot log + let mut timers = Vec::new(); + if let Err(err) = async { + // 0. once action time + let full_start = Instant::now(); + + // 1. decode time + let start = Instant::now(); + let Update(prev, merged, id) = bincode::options() + .deserialize::>(&buf)?; + + let elapsed = start.elapsed(); + timers.push(elapsed); + + // 2. verify clocks time + let start = Instant::now(); + for clock in [&prev].into_iter().chain(&merged) { + if let Some(document) = clock.verify()? { + for (i, pcr) in pcrs.iter().enumerate() { + anyhow::ensure!( + document.pcrs.get(&i).map(|pcr| &**pcr) == Some(pcr), + "PCR value mismatch at index {}", i + ) + } + } + } + + let elapsed = start.elapsed(); + timers.push(elapsed); + + // 3. update clock time + let start = Instant::now(); + let plain = prev + .plain + .update(merged.iter().map(|clock| &clock.plain), id); + + let elapsed = start.elapsed(); + timers.push(elapsed); + + // 4. gen clock with proof time + let start = Instant::now(); + // relies on the fact that different clocks always hash into different + // digests, hopefully true + let user_data = plain.sha256().to_fixed_bytes().to_vec(); + let document = nsm.process_attestation(user_data)?; + let updated = NitroEnclavesClock { + plain, + document: Payload(document), + }; + + let elapsed = start.elapsed(); + timers.push(elapsed); + + let elapsed = full_start.elapsed(); + timers.push(elapsed); + + let buf = bincode::options().serialize(&(id, updated, timers))?; + write_sender.send(buf)?; + Ok(()) + } + .await + { + warn!("{err}") + } + Ok(()) + }) + }) + } + + pub async fn run(port: u32) -> anyhow::Result<()> { + let handler: HandleFn = NitroEnclavesClock::worker(); + + NitroSecure::run(port, handler).await + } +} + + +pub async fn nitro_enclaves_portal_session( + cid: u32, + port: u32, + mut events: UnboundedReceiver>, + sender: UnboundedSender>, +) -> anyhow::Result<()> { + use std::os::fd::AsRawFd; + + use bincode::Options; + use nix::sys::socket::{connect, socket, AddressFamily, SockFlag, SockType, VsockAddr}; + use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; + + let fd = socket( + AddressFamily::Vsock, + SockType::Stream, + SockFlag::empty(), + None, + )?; + // this one is blocking, but should be instant, hopefully + { + let _span = tracing::debug_span!("connect").entered(); + connect(fd.as_raw_fd(), &VsockAddr::new(cid, port))? + } + let stream = std::os::unix::net::UnixStream::from(fd); + stream.set_nonblocking(true)?; + let stream = tokio::net::UnixStream::from_std(stream)?; + let (mut read_half, mut write_half) = stream.into_split(); + let write_session = tokio::spawn(async move { + while let Some(update) = events.recv().await { + let buf = bincode::options().serialize(&update)?; + write_half.write_u64_le(buf.len() as _).await?; + write_half.write_all(&buf).await? + } + anyhow::Ok(()) + }); + let read_session = tokio::spawn(async move { + loop { + let len = read_half.read_u64_le().await?; + let mut buf = vec![0; len as _]; + read_half.read_exact(&mut buf).await?; + sender.send(bincode::options().deserialize(&buf)?)? + } + #[allow(unreachable_code)] // for type hinting + anyhow::Ok(()) + }); + tokio::select! { + result = write_session => return result?, + result = read_session => result?? + } + anyhow::bail!("unreachable") +} + +#[cfg(feature = "nitro-enclaves")] +pub mod impls { + + use super::NitroEnclavesClock; + use crate::{Clocked, Verify}; + + impl Verify<()> for Clocked { + fn verify_clock(&self, _: usize, (): &()) -> anyhow::Result<()> { + self.clock.verify()?; + Ok(()) + } + } +} + +pub fn try_connection(cid: u32, port: u32) -> anyhow::Result { + use nix::sys::socket::{connect, socket, AddressFamily, SockFlag, SockType, VsockAddr}; + use std::os::fd::AsRawFd; + + let fd = socket( + AddressFamily::Vsock, + SockType::Stream, + SockFlag::empty(), + None, + )?; + + { + let _span = tracing::debug_span!("connect").entered(); + connect(fd.as_raw_fd(), &VsockAddr::new(cid, port))? + } + + let stream = std::os::unix::net::UnixStream::from(fd); + stream.set_nonblocking(true)?; + + let stream = tokio::net::UnixStream::from_std(stream)?; + Ok(stream) +} + +pub async fn tee_start_listening( + stream: tokio::net::UnixStream, + mut events: UnboundedReceiver>, + sender: UnboundedSender>, +) -> anyhow::Result<()> { + use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; + + let (mut read_half, mut write_half) = stream.into_split(); + + let write_session = tokio::spawn(async move { + while let Some(prompt) = events.recv().await { + let buf = bincode::options().serialize(&prompt)?; + write_half.write_u64_le(buf.len() as _).await?; + write_half.write_all(&buf).await?; + } + anyhow::Ok(()) + }); + + let read_session = tokio::spawn(async move { + loop { + let len = read_half.read_u64_le().await?; + let mut buf = vec![0; len as _]; + read_half.read_exact(&mut buf).await?; + sender.send(bincode::options().deserialize(&buf)?)? + } + #[allow(unreachable_code)] // for type hinting + anyhow::Ok(()) + }); + + tokio::select! { + result = write_session => return result?, + result = read_session => result?? + } + + anyhow::bail!("unreachable") +} diff --git a/demos/test_conflict/src/lib.rs b/demos/test_conflict/src/lib.rs index ca3f9b1..1b032ca 100644 --- a/demos/test_conflict/src/lib.rs +++ b/demos/test_conflict/src/lib.rs @@ -1,26 +1,33 @@ use std::collections::HashMap; #[derive(Debug)] -struct VectorClock { +struct _VectorClock { clock: HashMap, } -impl VectorClock { - fn new() -> Self { - VectorClock { +impl _VectorClock { + fn _new() -> Self { + _VectorClock { clock: HashMap::new(), } } - fn get_version(&self, node_id: &str) -> &i64 { + fn _get_version(&self, node_id: &str) -> &i64 { self.clock.get(node_id).unwrap_or(&0) } - fn update_version(&mut self, node_id: &str, version: i64) { - self.clock.insert(node_id.parse().unwrap(), version); + fn _update_version(&mut self, node_id: &str, version: i64) { + match node_id.parse() { + Ok(node) => { + self.clock.insert(node, version); + } + Err(e) => { + eprintln!("Invalid node ID: {}", e); + } + } } - fn has_conflict(&self, other: &VectorClock,) -> bool { + fn _has_conflict(&self, other: &_VectorClock,) -> bool { let mut all_greater = true; let mut all_smaller = true; @@ -44,15 +51,15 @@ mod tests { #[test] fn test_conflict() { - let mut clock1 = VectorClock::new(); - clock1.update_version("A".to_string().as_str(), 1); - clock1.update_version("B".to_string().as_str(), 2); + let mut clock1 = _VectorClock::_new(); + clock1._update_version("A", 1); + clock1._update_version("B", 2); - let mut clock2 = VectorClock::new(); - clock2.update_version("A".to_string().as_str(), 2); - clock2.update_version("B".to_string().as_str(), 1); + let mut clock2 = _VectorClock::_new(); + clock2._update_version("A", 2); + clock2._update_version("B", 1); - assert!(clock1.has_conflict(&clock2)); + assert!(clock1._has_conflict(&clock2)); } } diff --git a/demos/vlc-dag/Cargo.toml b/demos/vlc_dag/Cargo.toml similarity index 96% rename from demos/vlc-dag/Cargo.toml rename to demos/vlc_dag/Cargo.toml index 14d895f..326327a 100644 --- a/demos/vlc-dag/Cargo.toml +++ b/demos/vlc_dag/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "vlc-dag" +name = "vlc_dag" version = "0.1.0" edition = "2021" diff --git a/demos/vlc-dag/src/db_client/lldb_client.rs b/demos/vlc_dag/src/db_client/lldb_client.rs similarity index 89% rename from demos/vlc-dag/src/db_client/lldb_client.rs rename to demos/vlc_dag/src/db_client/lldb_client.rs index 168891c..4fe8cd8 100644 --- a/demos/vlc-dag/src/db_client/lldb_client.rs +++ b/demos/vlc_dag/src/db_client/lldb_client.rs @@ -5,19 +5,19 @@ pub struct VLCLLDb { env: Environment, merge_log: DbHandle, clock_infos: DbHandle, - cur_count: DbHandle, + _cur_count: DbHandle, } impl VLCLLDb { pub fn new(path: &str, mode: Option) -> Self { - let mut USER_DIR_MODE: u32 = 0o777; + let mut user_dir_mode: u32 = 0o777; if let Some(mode) = mode { - USER_DIR_MODE = mode; + user_dir_mode = mode; } let env = Environment::new() .max_dbs(5) - .open(path, USER_DIR_MODE) + .open(path, user_dir_mode) .expect("Failed to open the environment"); let merge_log = env @@ -32,7 +32,7 @@ impl VLCLLDb { .create_db("cur_count", DbFlags::empty()) .expect("Failed to create the cur_count database"); - VLCLLDb { env, merge_log, clock_infos, cur_count } + VLCLLDb { env, merge_log, clock_infos, _cur_count: cur_count } } pub(crate) fn add_clock_infos(&mut self, key: String, clock_info: ClockInfo) { diff --git a/demos/vlc-dag/src/db_client/lldb_test.rs b/demos/vlc_dag/src/db_client/lldb_test.rs similarity index 84% rename from demos/vlc-dag/src/db_client/lldb_test.rs rename to demos/vlc_dag/src/db_client/lldb_test.rs index 423c7cd..c0fd544 100644 --- a/demos/vlc-dag/src/db_client/lldb_test.rs +++ b/demos/vlc_dag/src/db_client/lldb_test.rs @@ -10,14 +10,14 @@ struct Commit { message: String, } -struct Repository { +struct _Repository { env: Environment, commits_db: DbHandle, } -impl Repository { +impl _Repository { - fn new(path: &str, db: &str) -> Self { + fn _new(path: &str, db: &str) -> Self { const USER_DIR: u32 = 0o777; let env = Environment::new() .max_dbs(5) @@ -28,10 +28,10 @@ impl Repository { .create_db(db, DbFlags::empty()) .expect("Failed to create the commits database"); - Repository { env, commits_db } + _Repository { env, commits_db } } - fn add_commit(&mut self, commit: &Commit) { + fn _add_commit(&mut self, commit: &Commit) { // let db = self.env.get_default_db(DbFlags::empty()).unwrap(); let txn = self.env.new_transaction().unwrap(); let db = txn.bind(&self.commits_db); @@ -50,28 +50,28 @@ mod tests { #[test] fn submit_to_repo() { - let mut repo = Repository::new("./db", "commit"); + let mut repo = _Repository::_new("./db", "commit"); let commit1 = Commit { id: "commit1".to_string(), parent_ids: HashSet::new(), message: "Initial commit".to_string(), }; - repo.add_commit(&commit1); + repo._add_commit(&commit1); let commit2 = Commit { id: "commit2".to_string(), parent_ids: vec!["commit1".to_string()].into_iter().collect(), message: "Add feature X".to_string(), }; - repo.add_commit(&commit2); + repo._add_commit(&commit2); let commit3 = Commit { id: "commit3".to_string(), parent_ids: vec!["commit1".to_string()].into_iter().collect(), message: "Add feature Y".to_string(), }; - repo.add_commit(&commit3); + repo._add_commit(&commit3); let commit4 = Commit { id: "commit4".to_string(), @@ -80,6 +80,6 @@ mod tests { .collect(), message: "Merge feature X and Y".to_string(), }; - repo.add_commit(&commit4); + repo._add_commit(&commit4); } } \ No newline at end of file diff --git a/demos/vlc-dag/src/db_client/mod.rs b/demos/vlc_dag/src/db_client/mod.rs similarity index 100% rename from demos/vlc-dag/src/db_client/mod.rs rename to demos/vlc_dag/src/db_client/mod.rs diff --git a/demos/vlc-dag/src/lib.rs b/demos/vlc_dag/src/lib.rs similarity index 97% rename from demos/vlc-dag/src/lib.rs rename to demos/vlc_dag/src/lib.rs index 557f534..1616a74 100644 --- a/demos/vlc-dag/src/lib.rs +++ b/demos/vlc_dag/src/lib.rs @@ -10,19 +10,16 @@ pub mod db_client; -use clap::builder::Str; +// use clap::builder::Str; use db_client::lldb_client::VLCLLDb; use serde::{Deserialize, Serialize}; use std::time::UNIX_EPOCH; use std::{cmp, time::SystemTime}; -use std::collections::{BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeSet, HashMap}; use std::io::BufRead; use std::sync::{Arc, RwLock}; use std::net::SocketAddr; use tokio::net::UdpSocket; - -use std::time; -use tokio::{task::JoinHandle}; use vlc::Clock; use lmdb_rs as lmdb; @@ -222,7 +219,7 @@ impl ServerState { } } - fn handle_diff_req(&mut self, msg: DiffReq, db: Arc>) -> Option { + fn handle_diff_req(&mut self, msg: DiffReq, _db: Arc>) -> Option { // println!("Key-To-messageid: {:?}", self.clock_to_eventid); if msg.from_clock.count == 0 { let req = ServerMessage::DiffRsp(DiffRsp { diffs: self.items.clone(), from:self.clock_info.clone(), to: msg.from_clock.id }); @@ -239,7 +236,7 @@ impl ServerState { } } - fn handle_diff_rsp(&mut self, msg: DiffRsp, db: Arc>) -> bool { + fn handle_diff_rsp(&mut self, msg: DiffRsp, _db: Arc>) -> bool { match self.clock_info.clock.partial_cmp(&msg.from.clock) { Some(cmp::Ordering::Equal) => {}, Some(cmp::Ordering::Greater) => {}, @@ -262,7 +259,7 @@ impl ServerState { false } - fn handle_active_sync(&mut self, msg: ActiveSync, db: Arc>) -> (Option, bool){ + fn handle_active_sync(&mut self, msg: ActiveSync, _db: Arc>) -> (Option, bool){ match self.clock_info.clock.partial_cmp(&msg.latest.clock) { Some(cmp::Ordering::Equal) => return (None, false), Some(cmp::Ordering::Greater) => return (None, false), @@ -411,7 +408,6 @@ impl Server { self.sinker_merge_log(&msg.latest).await; } } - _ => { println!("[broadcast_state]: not support ServerMessage ")} } } Message::Terminate => { @@ -462,22 +458,22 @@ impl Server { /// direct send to someone node async fn direct_send(&mut self, fmsg: ServerMessage) { - let mut index = 0; + let mut _index = 0; let server_msg = Message::FromServer(fmsg.clone()); match fmsg.clone() { ServerMessage::DiffReq(msg) => { - index = msg.to; + _index = msg.to; } ServerMessage::DiffRsp(msg) => { - index = msg.to; + _index = msg.to; } ServerMessage::ActiveSync(msg) => { - index = msg.to; + _index = msg.to; } _ => { return } } - let msg_index: usize = index.try_into().unwrap(); + let msg_index: usize = _index.try_into().unwrap(); if self.index != msg_index { self.socket .send_to( @@ -549,9 +545,10 @@ fn get_suffix(vec: &[T], target: T) -> Option> { #[cfg(test)] mod tests { - use rand::Rng; - use super::*; + use rand::Rng; + use std::time; + use tokio::task::JoinHandle; async fn start_servers(n_server: usize) -> (Configuration, Vec>>) { let mut config = Configuration {