From 94fcfb731a11de92bcdfee0f344a0bb5602f82d5 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 17:27:59 +0200 Subject: [PATCH 01/18] Increase log level of handler errors to trace-level --- aquadoggo/src/network/peers/handler.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aquadoggo/src/network/peers/handler.rs b/aquadoggo/src/network/peers/handler.rs index 43cf8de2f..468652463 100644 --- a/aquadoggo/src/network/peers/handler.rs +++ b/aquadoggo/src/network/peers/handler.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream as NegotiatedStream, SubstreamProtocol, }; -use log::warn; +use log::trace; use thiserror::Error; use crate::network::peers::{Codec, CodecError, PeerMessage, Protocol}; @@ -294,7 +294,7 @@ impl ConnectionHandler for Handler { )); } Poll::Ready(Some(Err(err))) => { - warn!("Error decoding inbound message: {err}"); + trace!("Error decoding inbound message: {err}"); // Close this side of the stream. If the peer is still around, they // will re-establish their connection @@ -320,7 +320,7 @@ impl ConnectionHandler for Handler { // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. - warn!("Error during closing inbound connection: {err}") + trace!("Error during closing inbound connection: {err}") } self.inbound_substream = None; break; @@ -373,14 +373,14 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(err) => { - warn!("Error sending outbound message: {err}"); + trace!("Error sending outbound message: {err}"); self.outbound_substream = None; break; } } } Poll::Ready(Err(err)) => { - warn!("Error encoding outbound message: {err}"); + trace!("Error encoding outbound message: {err}"); self.outbound_substream = None; break; } @@ -399,7 +399,7 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(err)) => { - warn!("Error flushing outbound message: {err}"); + trace!("Error flushing outbound message: {err}"); self.outbound_substream = None; break; } From 466869858268f79ed73ba24ce03ca0505a2d7cf8 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 17:35:44 +0200 Subject: [PATCH 02/18] Convert to ip addresses when logging multiaddr on info level --- aquadoggo/src/network/service.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index aad66aa6f..d4df73d94 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -22,6 +22,8 @@ use crate::network::behaviour::{Event, P2pandaBehaviour}; use crate::network::config::NODE_NAMESPACE; use crate::network::{identity, peers, swarm, utils, ShutdownHandler}; +const RELAY_CONNECT_TIMEOUT: Duration = Duration::from_secs(5); + /// Network service which handles all networking logic for a p2panda node. /// /// This includes: @@ -92,11 +94,14 @@ pub async fn network_service( swarm.behaviour_mut().peers.disable(); for mut relay_address in network_config.relay_addresses.clone() { + if let Some(address) = utils::to_quic_address(&relay_address) { + info!("Connecting to relay node {}", address); + } + // Attempt to connect to the relay node, we give this a 5 second timeout so as not to // get stuck if one relay is unreachable. - info!("Connecting to relay node at: {relay_address}"); if let Ok(result) = tokio::time::timeout( - Duration::from_secs(5), + RELAY_CONNECT_TIMEOUT, connect_to_relay(&mut swarm, &mut relay_address), ) .await @@ -157,10 +162,14 @@ pub async fn network_service( // Dial all nodes we want to directly connect to. for direct_node_address in &network_config.direct_node_addresses { - info!("Connecting to node at: {direct_node_address}"); + if let Some(address) = utils::to_quic_address(&direct_node_address) { + info!("Connecting to node @ {}", address); + } + let opts = DialOpts::unknown_peer_id() .address(direct_node_address.clone()) .build(); + match swarm.dial(opts) { Ok(_) => (), Err(err) => debug!("Error dialing node: {:?}", err), @@ -207,7 +216,7 @@ pub async fn connect_to_relay( info: identify::Info { observed_addr, .. }, peer_id, })) => { - info!("Relay told us our public address: {:?}", observed_addr); + debug!("Relay told us our public address: {:?}", observed_addr); // Add the newly learned address to our external addresses. swarm.add_external_address(observed_addr); @@ -441,13 +450,13 @@ impl EventLoop { rendezvous_node, .. } => { - info!("Discovered peers registered at rendezvous: {registrations:?}",); + debug!("Discovered peers registered at rendezvous: {registrations:?}",); for registration in registrations { for address in registration.record.addresses() { let peer_id = registration.record.peer_id(); if peer_id != self.local_peer_id { - info!("Add new peer to address book: {} {}", peer_id, address); + debug!("Add new peer to address book: {} {}", peer_id, address); if let Some(relay_address) = self.relay_addresses.get(rendezvous_node) { let peer_circuit_address = relay_address @@ -476,7 +485,7 @@ impl EventLoop { info: identify::Info { observed_addr, .. }, .. } => { - info!("Observed external address reported: {observed_addr}"); + debug!("Observed external address reported: {observed_addr}"); if !self .swarm .external_addresses() From b032092e25aa1639325296e4e6dbca473a581183 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 17:42:44 +0200 Subject: [PATCH 03/18] Make clippy happy --- aquadoggo/src/network/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquadoggo/src/network/service.rs b/aquadoggo/src/network/service.rs index d4df73d94..200523262 100644 --- a/aquadoggo/src/network/service.rs +++ b/aquadoggo/src/network/service.rs @@ -162,7 +162,7 @@ pub async fn network_service( // Dial all nodes we want to directly connect to. for direct_node_address in &network_config.direct_node_addresses { - if let Some(address) = utils::to_quic_address(&direct_node_address) { + if let Some(address) = utils::to_quic_address(direct_node_address) { info!("Connecting to node @ {}", address); } From a462e971bc393c951be67b4f994ee0fcd10d7387 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 18:29:48 +0200 Subject: [PATCH 04/18] Add CI to compile and publish releases --- .github/workflows/release.yml | 44 +++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/release.yml diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..7fc27b3c2 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,44 @@ +name: compile and release binaries + +permissions: + contents: write + +on: + push: + tags: + - v[0-9]+.[0-9]+.[0-9]+ + +jobs: + create-release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: taiki-e/create-gh-release-action@v1 + with: + changelog: CHANGELOG.md + token: ${{ secrets.GITHUB_TOKEN }} + + upload-assets: + strategy: + matrix: + include: + - target: aarch64-unknown-linux-gnu + os: ubuntu-latest + - target: aarch64-apple-darwin + os: macos-latest + - target: x86_64-unknown-linux-gnu + os: ubuntu-latest + - target: x86_64-apple-darwin + os: macos-latest + - target: universal-apple-darwin + os: macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: taiki-e/upload-rust-binary-action@v1 + with: + bin: aquadoggo + tar: unix + archive: $bin-$tag-$target + target: ${{ matrix.target }} + token: ${{ secrets.GITHUB_TOKEN }} From ba76db8aba88150c5b71a19711489388ddda8525 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 18:37:16 +0200 Subject: [PATCH 05/18] Add armv7-unknown-linux-gnueabihf target for cross compilation CI --- .github/workflows/release.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7fc27b3c2..0e4afe9de 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,10 +24,12 @@ jobs: include: - target: aarch64-unknown-linux-gnu os: ubuntu-latest - - target: aarch64-apple-darwin - os: macos-latest - target: x86_64-unknown-linux-gnu os: ubuntu-latest + - target: armv7-unknown-linux-gnueabihf + os: ubuntu-latest + - target: aarch64-apple-darwin + os: macos-latest - target: x86_64-apple-darwin os: macos-latest - target: universal-apple-darwin From c27cac59c5eb3fff5567e3ce64f85c84e13bd433 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 18:41:25 +0200 Subject: [PATCH 06/18] Add v0.5.0 to CHANGELOG.md --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 347220506..bb9e61724 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.5.0] + ### Added - Dial peers discovered via mDNS [#331](https://github.com/p2panda/aquadoggo/pull/331) @@ -222,7 +224,8 @@ Released on 2021-10-25: :package: [`crate`](https://crates.io/crates/aquadoggo/0 - Use p2panda-rs 0.2.1 with fixed linter setting [#41](https://github.com/p2panda/aquadoggo/41) - Use `tide` for HTTP server and `jsonrpc-v2` for JSON RPC [#29](https://github.com/p2panda/aquadoggo/29) -[unreleased]: https://github.com/p2panda/aquadoggo/compare/v0.4.0...HEAD +[unreleased]: https://github.com/p2panda/aquadoggo/compare/v0.5.0...HEAD +[0.5.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.5.0 [0.4.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.4.0 [0.3.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.3.0 [0.2.0]: https://github.com/p2panda/aquadoggo/releases/tag/v0.2.0 From dc30e31d79d593b1514bbf975cceed5fffe80280 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 19:10:25 +0200 Subject: [PATCH 07/18] Add checksum when releasing binaries --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0e4afe9de..9e95a0e2e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -41,6 +41,7 @@ jobs: with: bin: aquadoggo tar: unix + checksum: sha512 archive: $bin-$tag-$target target: ${{ matrix.target }} token: ${{ secrets.GITHUB_TOKEN }} From 2cf0f2a3355ed128173fec1403c8eb2ba1354402 Mon Sep 17 00:00:00 2001 From: adz Date: Fri, 25 Aug 2023 19:30:05 +0200 Subject: [PATCH 08/18] Adjust release profile --- Cargo.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index fc24a6d0e..efbeff01c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,3 +3,8 @@ members = [ "aquadoggo", "aquadoggo_cli", ] + +[profile.release] +strip = true +lto = true +codegen-units = 1 From 96e85bc5730e566c995778a43a76fec12e8303b2 Mon Sep 17 00:00:00 2001 From: adz Date: Sat, 26 Aug 2023 13:33:18 +0200 Subject: [PATCH 09/18] Add compiling windows binaries as well, remove macos universal one --- .github/workflows/docker.yml | 2 +- .github/workflows/release.yml | 21 ++++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index fe492a48e..e8fb5e50a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,4 @@ -name: build and upload docker image +name: docker on: push: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 9e95a0e2e..b06673a5d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: compile and release binaries +name: release permissions: contents: write @@ -22,18 +22,20 @@ jobs: strategy: matrix: include: - - target: aarch64-unknown-linux-gnu + - target: aarch64-unknown-linux-gnu # ARM64 Linux (kernel 4.1, glibc 2.17+) os: ubuntu-latest - - target: x86_64-unknown-linux-gnu + - target: x86_64-unknown-linux-gnu # 64-bit Linux (kernel 3.2+, glibc 2.17+) os: ubuntu-latest - - target: armv7-unknown-linux-gnueabihf + - target: armv7-unknown-linux-gnueabihf # ARMv7-A Linux, hardfloat (kernel 3.2, glibc 2.17, RPi) os: ubuntu-latest - - target: aarch64-apple-darwin + - target: aarch64-apple-darwin # ARM64 macOS (11.0+, Big Sur+) os: macos-latest - - target: x86_64-apple-darwin - os: macos-latest - - target: universal-apple-darwin + - target: x86_64-apple-darwin # # 64-bit macOS (10.7+, Lion+) os: macos-latest + - target: x86_64-pc-windows-gnu # 64-bit MinGW (Windows 7+) + os: windows-latest + - target: x86_64-pc-windows-msvc # 64-bit MSVC (Windows 7+) + os: windows-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 @@ -41,7 +43,8 @@ jobs: with: bin: aquadoggo tar: unix - checksum: sha512 + zip: windows + checksum: sha256,sha512 archive: $bin-$tag-$target target: ${{ matrix.target }} token: ${{ secrets.GITHUB_TOKEN }} From bcf0fbbb891854ca94ac77eb695d6cbb3bf5d36c Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 30 Aug 2023 10:39:08 +0200 Subject: [PATCH 10/18] Bring back all connection handler logs to warn level again --- aquadoggo/src/network/peers/handler.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/aquadoggo/src/network/peers/handler.rs b/aquadoggo/src/network/peers/handler.rs index 468652463..43cf8de2f 100644 --- a/aquadoggo/src/network/peers/handler.rs +++ b/aquadoggo/src/network/peers/handler.rs @@ -12,7 +12,7 @@ use libp2p::swarm::{ ConnectionHandler, ConnectionHandlerEvent, KeepAlive, Stream as NegotiatedStream, SubstreamProtocol, }; -use log::trace; +use log::warn; use thiserror::Error; use crate::network::peers::{Codec, CodecError, PeerMessage, Protocol}; @@ -294,7 +294,7 @@ impl ConnectionHandler for Handler { )); } Poll::Ready(Some(Err(err))) => { - trace!("Error decoding inbound message: {err}"); + warn!("Error decoding inbound message: {err}"); // Close this side of the stream. If the peer is still around, they // will re-establish their connection @@ -320,7 +320,7 @@ impl ConnectionHandler for Handler { // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. - trace!("Error during closing inbound connection: {err}") + warn!("Error during closing inbound connection: {err}") } self.inbound_substream = None; break; @@ -373,14 +373,14 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(err) => { - trace!("Error sending outbound message: {err}"); + warn!("Error sending outbound message: {err}"); self.outbound_substream = None; break; } } } Poll::Ready(Err(err)) => { - trace!("Error encoding outbound message: {err}"); + warn!("Error encoding outbound message: {err}"); self.outbound_substream = None; break; } @@ -399,7 +399,7 @@ impl ConnectionHandler for Handler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(err)) => { - trace!("Error flushing outbound message: {err}"); + warn!("Error flushing outbound message: {err}"); self.outbound_substream = None; break; } From 0cdfa63abdc86c3745a9a5abc499dc499faaea71 Mon Sep 17 00:00:00 2001 From: Andreas Dzialocha Date: Wed, 30 Aug 2023 13:42:38 +0200 Subject: [PATCH 11/18] Move deserialization into `PeerMessage` to distinct variants correctly (#538) * Move deserialization into PeerMessage to distinct variants correctly * Add entry to CHANGELOG.md --- CHANGELOG.md | 1 + aquadoggo/src/network/peers/message.rs | 251 +++++++++++++++++++++- aquadoggo/src/replication/announcement.rs | 112 +--------- aquadoggo/src/replication/message.rs | 160 +------------- 4 files changed, 258 insertions(+), 266 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb9e61724..5a9896c6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -93,6 +93,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Don't update or announce an update in schema provider if a schema with this id exists already [#472](https://github.com/p2panda/aquadoggo/pull/472) - Do nothing on document_view insertion conflicts [#474](https://github.com/p2panda/aquadoggo/pull/474) - Only over-write `http_port` when cli arg is passed [#489](https://github.com/p2panda/aquadoggo/pull/489) +- Move deserialization into PeerMessage to distinct variants correctly [#538](https://github.com/p2panda/aquadoggo/pull/538) ### Open Sauce diff --git a/aquadoggo/src/network/peers/message.rs b/aquadoggo/src/network/peers/message.rs index 6f1c98ea6..584ee2b0c 100644 --- a/aquadoggo/src/network/peers/message.rs +++ b/aquadoggo/src/network/peers/message.rs @@ -1,11 +1,19 @@ // SPDX-License-Identifier: AGPL-3.0-or-later +use p2panda_rs::entry::{EncodedEntry, LogId, SeqNum}; +use p2panda_rs::identity::PublicKey; +use p2panda_rs::operation::EncodedOperation; +use p2panda_rs::Validate; +use serde::de::Visitor; use serde::{Deserialize, Serialize}; -use crate::replication::{AnnouncementMessage, SyncMessage}; +use crate::replication::{ + Announcement, AnnouncementMessage, Message, Mode, SchemaIdSet, SessionId, SyncMessage, + ANNOUNCE_TYPE, ENTRY_TYPE, HAVE_TYPE, SYNC_DONE_TYPE, SYNC_REQUEST_TYPE, +}; /// p2panda protocol messages which can be sent over the wire. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] #[serde(untagged)] pub enum PeerMessage { /// Announcement of peers about the schema ids they are interest in. @@ -14,3 +22,242 @@ pub enum PeerMessage { /// Replication status and data exchange. SyncMessage(SyncMessage), } + +impl<'de> Deserialize<'de> for PeerMessage { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct MessageVisitor; + + impl<'de> Visitor<'de> for MessageVisitor { + type Value = PeerMessage; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("p2panda message") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let message_type: u64 = seq + .next_element()? + .ok_or_else(|| serde::de::Error::custom("invalid message type"))?; + + let message = match message_type { + ANNOUNCE_TYPE => { + let protocol_version: u64 = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing protocol version in announce message") + })?; + + let timestamp: u64 = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing timestamp in announce message") + })?; + + let supported_schema_ids: SchemaIdSet = + seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing target set in announce message") + })?; + supported_schema_ids.validate().map_err(|_| { + serde::de::Error::custom("invalid target set in announce message") + })?; + + PeerMessage::Announce(AnnouncementMessage( + protocol_version, + Announcement { + supported_schema_ids, + timestamp, + }, + )) + } + SYNC_REQUEST_TYPE => { + let session_id: SessionId = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing session id in replication message") + })?; + + let mode: Mode = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing mode in sync request message") + })?; + + let target_set: SchemaIdSet = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing target set in sync request message") + })?; + + target_set.validate().map_err(|_| { + serde::de::Error::custom("invalid target set in sync request message") + })?; + + if target_set.is_empty() { + return Err(serde::de::Error::custom( + "empty target set in sync request message", + )); + } + + PeerMessage::SyncMessage(SyncMessage::new( + session_id, + Message::SyncRequest(mode, target_set), + )) + } + ENTRY_TYPE => { + let session_id: SessionId = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing session id in replication message") + })?; + + let entry_bytes: EncodedEntry = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing entry bytes in entry message") + })?; + + let operation_bytes: Option = seq.next_element()?; + + PeerMessage::SyncMessage(SyncMessage::new( + session_id, + Message::Entry(entry_bytes, operation_bytes), + )) + } + SYNC_DONE_TYPE => { + let session_id: SessionId = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing session id in replication message") + })?; + + let live_mode: bool = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing live mode flag in sync done message") + })?; + + PeerMessage::SyncMessage(SyncMessage::new( + session_id, + Message::SyncDone(live_mode), + )) + } + HAVE_TYPE => { + let session_id: SessionId = seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing session id in replication message") + })?; + + let log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = + seq.next_element()?.ok_or_else(|| { + serde::de::Error::custom("missing log heights in have message") + })?; + + PeerMessage::SyncMessage(SyncMessage::new( + session_id, + Message::Have(log_heights), + )) + } + _ => return Err(serde::de::Error::custom("unknown message type")), + }; + + if let Some(items_left) = seq.size_hint() { + if items_left > 0 { + return Err(serde::de::Error::custom( + "too many fields for p2panda message", + )); + } + }; + + Ok(message) + } + } + + deserializer.deserialize_seq(MessageVisitor) + } +} + +#[cfg(test)] +mod tests { + use ciborium::cbor; + use ciborium::value::{Error, Value}; + use p2panda_rs::entry::{LogId, SeqNum}; + use p2panda_rs::identity::PublicKey; + use p2panda_rs::serde::{deserialize_into, serialize_value}; + use p2panda_rs::test_utils::fixtures::public_key; + use rstest::rstest; + + use crate::replication::{ + Announcement, AnnouncementMessage, Message, Mode, SchemaIdSet, SyncMessage, + }; + use crate::test_utils::helpers::random_schema_id_set; + + use super::PeerMessage; + + #[rstest] + fn deserialize( + #[from(random_schema_id_set)] supported_schema_ids: SchemaIdSet, + #[from(random_schema_id_set)] target_set: SchemaIdSet, + public_key: PublicKey, + ) { + assert_eq!( + deserialize_into::(&serialize_value(cbor!([ + 0, + 1, + 12345678, + supported_schema_ids + ]))) + .unwrap(), + PeerMessage::Announce(AnnouncementMessage::new(Announcement { + timestamp: 12345678, + supported_schema_ids, + })) + ); + + assert_eq!( + deserialize_into::(&serialize_value(cbor!([1, 12, 0, target_set]))) + .unwrap(), + PeerMessage::SyncMessage(SyncMessage::new( + 12, + Message::SyncRequest(Mode::LogHeight, target_set.clone()) + )) + ); + + let log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = vec![]; + assert_eq!( + deserialize_into::(&serialize_value(cbor!([10, 12, log_heights]))) + .unwrap(), + PeerMessage::SyncMessage(SyncMessage::new(12, Message::Have(vec![]))) + ); + + assert_eq!( + deserialize_into::(&serialize_value(cbor!([ + 10, + 12, + vec![( + // Convert explicitly to bytes as `cbor!` macro doesn't understand somehow that + // `PublicKey` serializes to a byte array + serde_bytes::Bytes::new(&public_key.to_bytes()), + vec![(LogId::default(), SeqNum::default())] + )] + ]))) + .unwrap(), + PeerMessage::SyncMessage(SyncMessage::new( + 12, + Message::Have(vec![( + public_key, + vec![(LogId::default(), SeqNum::default())] + )]) + )) + ); + } + + #[rstest] + #[should_panic(expected = "invalid message type")] + #[case::invalid_message_type(cbor!([]))] + #[should_panic(expected = "missing protocol version in announce message")] + #[case::announce_missing_version(cbor!([0]))] + #[should_panic(expected = "missing timestamp in announce message")] + #[case::announce_missing_timestamp(cbor!([0, 122]))] + #[should_panic(expected = "too many fields for p2panda message")] + #[case::announce_too_many_fields(cbor!([0, 1, 0, ["schema_field_definition_v1"], "too much"]))] + #[should_panic(expected = "missing session id in replication message")] + #[case::sync_only_message_type(cbor!([1]))] + #[should_panic(expected = "empty target set in sync request")] + #[case::sync_only_message_type(cbor!([1, 0, 0, []]))] + #[should_panic(expected = "too many fields for p2panda message")] + #[case::sync_too_many_fields(cbor!([1, 0, 0, ["schema_field_definition_v1"], "too much"]))] + fn deserialize_invalid_messages(#[case] cbor: Result) { + // Check the cbor is valid + assert!(cbor.is_ok()); + + // We unwrap here to cause a panic and then test for expected error stings + deserialize_into::(&serialize_value(cbor)).unwrap(); + } +} diff --git a/aquadoggo/src/replication/announcement.rs b/aquadoggo/src/replication/announcement.rs index a775ab1bb..5720ae95f 100644 --- a/aquadoggo/src/replication/announcement.rs +++ b/aquadoggo/src/replication/announcement.rs @@ -2,14 +2,12 @@ use std::time::{SystemTime, UNIX_EPOCH}; -use p2panda_rs::Validate; -use serde::de::Visitor; use serde::ser::SerializeSeq; -use serde::{Deserialize, Serialize}; +use serde::Serialize; -use crate::replication::{MessageType, SchemaIdSet, ANNOUNCE_TYPE, REPLICATION_PROTOCOL_VERSION}; +use crate::replication::{SchemaIdSet, ANNOUNCE_TYPE, REPLICATION_PROTOCOL_VERSION}; -/// u64 timestamp from UNIX epoch until now. +/// U64 timestamp from UNIX epoch until now. pub fn now() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) @@ -40,7 +38,7 @@ pub type ProtocolVersion = u64; /// Message which can be used to send announcements over the wire. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct AnnouncementMessage(ProtocolVersion, Announcement); +pub struct AnnouncementMessage(pub ProtocolVersion, pub Announcement); impl AnnouncementMessage { pub fn new(announcement: Announcement) -> Self { @@ -70,76 +68,10 @@ impl Serialize for AnnouncementMessage { } } -impl<'de> Deserialize<'de> for AnnouncementMessage { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct MessageVisitor; - - impl<'de> Visitor<'de> for MessageVisitor { - type Value = AnnouncementMessage; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("p2panda announce message") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'de>, - { - let message_type: MessageType = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing message type in announce message") - })?; - - if message_type != ANNOUNCE_TYPE { - return Err(serde::de::Error::custom( - "invalid message type for announce message", - )); - } - - let protocol_version: ProtocolVersion = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing protocol version in announce message") - })?; - - let timestamp: u64 = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing timestamp in announce message") - })?; - - let supported_schema_ids: SchemaIdSet = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing target set in announce message") - })?; - supported_schema_ids.validate().map_err(|_| { - serde::de::Error::custom("invalid target set in announce message") - })?; - - if let Some(items_left) = seq.size_hint() { - if items_left > 0 { - return Err(serde::de::Error::custom( - "too many fields for announce message", - )); - } - }; - - Ok(AnnouncementMessage( - protocol_version, - Announcement { - supported_schema_ids, - timestamp, - }, - )) - } - } - - deserializer.deserialize_seq(MessageVisitor) - } -} - #[cfg(test)] mod tests { use ciborium::cbor; - use ciborium::value::{Error, Value}; - use p2panda_rs::serde::{deserialize_into, serialize_from, serialize_value}; + use p2panda_rs::serde::{serialize_from, serialize_value}; use rstest::rstest; use crate::replication::SchemaIdSet; @@ -155,38 +87,4 @@ mod tests { serialize_value(cbor!([0, 1, announcement.timestamp, supported_schema_ids])) ); } - - #[rstest] - fn deserialize(#[from(random_schema_id_set)] supported_schema_ids: SchemaIdSet) { - assert_eq!( - deserialize_into::(&serialize_value(cbor!([ - 0, - 1, - 12345678, - supported_schema_ids - ]))) - .unwrap(), - AnnouncementMessage::new(Announcement { - timestamp: 12345678, - supported_schema_ids, - }) - ); - } - - #[rstest] - #[should_panic(expected = "missing message type in announce message")] - #[case::missing_version(cbor!([]))] - #[should_panic(expected = "missing protocol version in announce message")] - #[case::missing_version(cbor!([0]))] - #[should_panic(expected = "missing timestamp in announce message")] - #[case::missing_timestamp(cbor!([0, 122]))] - #[should_panic(expected = "too many fields for announce message")] - #[case::too_many_fields(cbor!([0, 1, 0, ["schema_field_definition_v1"], "too much"]))] - fn deserialize_invalid_messages(#[case] cbor: Result) { - // Check the cbor is valid - assert!(cbor.is_ok()); - - // We unwrap here to cause a panic and then test for expected error stings - deserialize_into::(&serialize_value(cbor)).unwrap(); - } } diff --git a/aquadoggo/src/replication/message.rs b/aquadoggo/src/replication/message.rs index 6d5e92ba8..9a26c45a1 100644 --- a/aquadoggo/src/replication/message.rs +++ b/aquadoggo/src/replication/message.rs @@ -1,15 +1,12 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::fmt; - use p2panda_rs::entry::EncodedEntry; use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; use p2panda_rs::operation::EncodedOperation; -use p2panda_rs::{Human, Validate}; -use serde::de::Visitor; +use p2panda_rs::Human; use serde::ser::SerializeSeq; -use serde::{Deserialize, Serialize}; +use serde::Serialize; use crate::replication::{ MessageType, Mode, SchemaIdSet, SessionId, ENTRY_TYPE, HAVE_TYPE, SYNC_DONE_TYPE, @@ -120,103 +117,12 @@ impl Serialize for SyncMessage { } } -impl<'de> Deserialize<'de> for SyncMessage { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - struct SyncMessageVisitor; - - impl<'de> Visitor<'de> for SyncMessageVisitor { - type Value = SyncMessage; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("p2panda replication message") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: serde::de::SeqAccess<'de>, - { - let message_type: MessageType = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing message type in replication message") - })?; - - let session_id: SessionId = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing session id in replication message") - })?; - - let message = if message_type == SYNC_REQUEST_TYPE { - let mode: Mode = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing mode in sync request message") - })?; - - let target_set: SchemaIdSet = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing target set in sync request message") - })?; - - target_set.validate().map_err(|_| { - serde::de::Error::custom("invalid target set in sync request message") - })?; - - if target_set.is_empty() { - return Err(serde::de::Error::custom( - "empty target set in sync request message", - )); - } - - Ok(Message::SyncRequest(mode, target_set)) - } else if message_type == ENTRY_TYPE { - let entry_bytes: EncodedEntry = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing entry bytes in entry message") - })?; - - let operation_bytes: Option = seq.next_element()?; - - Ok(Message::Entry(entry_bytes, operation_bytes)) - } else if message_type == SYNC_DONE_TYPE { - let live_mode: bool = seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing live mode flag in sync done message") - })?; - - Ok(Message::SyncDone(live_mode)) - } else if message_type == HAVE_TYPE { - let log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = - seq.next_element()?.ok_or_else(|| { - serde::de::Error::custom("missing log heights in have message") - })?; - - Ok(Message::Have(log_heights)) - } else { - Err(serde::de::Error::custom(format!( - "unknown message type {} in replication message", - message_type - ))) - }?; - - if let Some(items_left) = seq.size_hint() { - if items_left > 0 { - return Err(serde::de::Error::custom( - "too many fields for replication message", - )); - } - }; - - Ok(SyncMessage::new(session_id, message)) - } - } - - deserializer.deserialize_seq(SyncMessageVisitor) - } -} - #[cfg(test)] mod tests { use ciborium::cbor; - use ciborium::value::{Error, Value}; use p2panda_rs::entry::{LogId, SeqNum}; use p2panda_rs::identity::PublicKey; - use p2panda_rs::serde::{deserialize_into, serialize_from, serialize_value}; + use p2panda_rs::serde::{serialize_from, serialize_value}; use p2panda_rs::test_utils::fixtures::public_key; use rstest::rstest; @@ -255,64 +161,4 @@ mod tests { ])) ); } - - #[rstest] - fn deserialize(#[from(random_schema_id_set)] target_set: SchemaIdSet, public_key: PublicKey) { - assert_eq!( - deserialize_into::(&serialize_value(cbor!([1, 12, 0, target_set]))) - .unwrap(), - SyncMessage::new( - 12, - Message::SyncRequest(Mode::LogHeight, target_set.clone()) - ) - ); - - let log_heights: Vec<(PublicKey, Vec<(LogId, SeqNum)>)> = vec![]; - assert_eq!( - deserialize_into::(&serialize_value(cbor!([10, 12, log_heights]))) - .unwrap(), - SyncMessage::new(12, Message::Have(vec![])) - ); - - assert_eq!( - deserialize_into::(&serialize_value(cbor!([ - 10, - 12, - vec![( - // Convert explicitly to bytes as `cbor!` macro doesn't understand somehow that - // `PublicKey` serializes to a byte array - serde_bytes::Bytes::new(&public_key.to_bytes()), - vec![(LogId::default(), SeqNum::default())] - )] - ]))) - .unwrap(), - SyncMessage::new( - 12, - Message::Have(vec![( - public_key, - vec![(LogId::default(), SeqNum::default())] - )]) - ) - ); - } - - #[rstest] - #[should_panic(expected = "missing message type in replication message")] - #[case::no_fields(cbor!([]))] - #[should_panic(expected = "unknown message type 122 in replication message")] - #[case::unknown_message_type(cbor!([122, 0]))] - #[should_panic(expected = "missing session id in replication message")] - #[case::only_message_type(cbor!([1]))] - #[should_panic(expected = "empty target set in sync request")] - #[case::only_message_type(cbor!([1, 0, 0, []]))] - #[should_panic(expected = "too many fields for replication message")] - #[case::too_many_fields(cbor!([1, 0, 0, ["schema_field_definition_v1"], "too much"]))] - fn deserialize_invalid_messages(#[case] cbor: Result) { - // Check the cbor is valid - assert!(cbor.is_ok()); - - // Deserialize into sync message, we unwrap here to cause a panic and then test for - // expected error stings - deserialize_into::(&serialize_value(cbor)).unwrap(); - } } From 9c8c49e28d08b5dd9b76c9fa195e41b6fa042116 Mon Sep 17 00:00:00 2001 From: Andreas Dzialocha Date: Wed, 30 Aug 2023 15:50:18 +0200 Subject: [PATCH 12/18] Improve log level user interface (#539) * Configure env_logger manually * Set log verbosity through config API, scope it to aquadoggo by default * Add entry to CHANGELOG.md --- CHANGELOG.md | 1 + aquadoggo_cli/src/config.rs | 13 +++++++++++++ aquadoggo_cli/src/main.rs | 14 +++++++++++--- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5a9896c6a..74e31dc9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Rework networking service [#502](https://github.com/p2panda/aquadoggo/pull/502) - Deduplicate peer connections when initiating replication sessions [#525](https://github.com/p2panda/aquadoggo/pull/525) - Improve consistency and documentation of configuration API [#528](https://github.com/p2panda/aquadoggo/pull/528) +- Improve log level config and user interface [#539](https://github.com/p2panda/aquadoggo/pull/539) ### Fixed diff --git a/aquadoggo_cli/src/config.rs b/aquadoggo_cli/src/config.rs index f6e3ab806..b0f42f6a3 100644 --- a/aquadoggo_cli/src/config.rs +++ b/aquadoggo_cli/src/config.rs @@ -220,6 +220,17 @@ struct Cli { )] #[serde(skip_serializing_if = "Option::is_none")] relay_mode: Option, + + /// Set log verbosity. Use this for learning more about how your node behaves or for debugging. + /// + /// Possible log levels are: ERROR, WARN, INFO, DEBUG, TRACE. They are scoped to "aquadoggo" by + /// default. + /// + /// If you want to adjust the scope for deeper inspection use a filter value, for example + /// "=TRACE" for logging _everything_ or "aquadoggo=INFO,libp2p=DEBUG" etc. + #[arg(short = 'l', long, value_name = "LEVEL")] + #[serde(skip_serializing_if = "Option::is_none")] + log_level: Option, } /// Clap converts wildcard symbols from command line arguments (for example --supported-schema-ids @@ -250,6 +261,7 @@ where /// Configuration derived from environment variables and .toml file. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Configuration { + pub log_level: String, pub allow_schema_ids: UncheckedAllowList, pub database_url: String, pub database_max_connections: u32, @@ -268,6 +280,7 @@ pub struct Configuration { impl Default for Configuration { fn default() -> Self { Self { + log_level: "off".into(), allow_schema_ids: UncheckedAllowList::Wildcard, database_url: "sqlite::memory:".into(), database_max_connections: 32, diff --git a/aquadoggo_cli/src/main.rs b/aquadoggo_cli/src/main.rs index af5b6b692..d02d22735 100644 --- a/aquadoggo_cli/src/main.rs +++ b/aquadoggo_cli/src/main.rs @@ -5,21 +5,29 @@ mod key_pair; mod utils; use std::convert::TryInto; +use std::str::FromStr; use anyhow::Context; use aquadoggo::{AllowList, Configuration, Node}; -use log::warn; +use env_logger::WriteStyle; +use log::{warn, LevelFilter}; use crate::config::{load_config, print_config}; use crate::key_pair::{generate_ephemeral_key_pair, generate_or_load_key_pair}; #[tokio::main] async fn main() -> anyhow::Result<()> { - env_logger::init(); - // Load configuration from command line arguments, environment variables and .toml file let (config_file_path, config) = load_config().context("Could not load configuration")?; + // Set log verbosity based on config. By default scope it always to the "aquadoggo" module. + let mut builder = env_logger::Builder::new(); + let builder = match LevelFilter::from_str(&config.log_level) { + Ok(log_level) => builder.filter(Some("aquadoggo"), log_level), + Err(_) => builder.parse_filters(&config.log_level), + }; + builder.write_style(WriteStyle::Always).init(); + // Convert to `aquadoggo` configuration format and check for invalid inputs let node_config = config .clone() From a5fd234410eedc43799171849c23e2302778372c Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 30 Aug 2023 21:35:50 +0200 Subject: [PATCH 13/18] Update RELEASE.md --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 5c56794b8..4aeb10ee8 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -21,7 +21,7 @@ _This is an example for publising version `1.2.0`._ ## Publishing releases -9. Copy the changelog entry you authored into Github's [new release page](https://github.com/p2panda/aquadoggo/releases/new)'s description field. Title it with your version `v1.2.0`. +9. The GitHub Action will automatically create the release on GitHub, compile binary targets and upload them as assets. Check if the jobs succeeded. 10. Run `cargo publish`. ## Publishing on DockerHub From 5861077bea23fc817b2c9872bad8c785b9205702 Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 30 Aug 2023 22:01:33 +0200 Subject: [PATCH 14/18] Do only set permissions mode on unix --- aquadoggo_cli/src/key_pair.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/aquadoggo_cli/src/key_pair.rs b/aquadoggo_cli/src/key_pair.rs index d18775d9f..9e3b1c3d5 100644 --- a/aquadoggo_cli/src/key_pair.rs +++ b/aquadoggo_cli/src/key_pair.rs @@ -2,7 +2,6 @@ use std::fs::{self, File}; use std::io::{Read, Write}; -use std::os::unix::fs::PermissionsExt; use std::path::PathBuf; use anyhow::Result; @@ -44,9 +43,12 @@ fn save_key_pair_to_file(key_pair: &KeyPair, path: PathBuf) -> Result<()> { file.sync_all()?; // Set permission for sensitive information - let mut permissions = file.metadata()?.permissions(); - permissions.set_mode(0o600); - fs::set_permissions(path, permissions)?; + if cfg!(unix) { + use std::os::unix::fs::PermissionsExt; + let mut permissions = file.metadata()?.permissions(); + permissions.set_mode(0o600); + fs::set_permissions(path, permissions)?; + } Ok(()) } From ca4edc34cb9aab1b39ddaa2e0d3f98ee8b1c1ccc Mon Sep 17 00:00:00 2001 From: adz Date: Wed, 30 Aug 2023 22:17:42 +0200 Subject: [PATCH 15/18] Set unix cfg correctly --- aquadoggo_cli/src/key_pair.rs | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/aquadoggo_cli/src/key_pair.rs b/aquadoggo_cli/src/key_pair.rs index 9e3b1c3d5..4635d2b83 100644 --- a/aquadoggo_cli/src/key_pair.rs +++ b/aquadoggo_cli/src/key_pair.rs @@ -2,6 +2,8 @@ use std::fs::{self, File}; use std::io::{Read, Write}; +#[cfg(target_os = "unix")] +use std::os::unix::fs::PermissionsExt; use std::path::PathBuf; use anyhow::Result; @@ -33,6 +35,7 @@ pub fn generate_ephemeral_key_pair() -> KeyPair { /// /// This method automatically creates the required directories on that path and fixes the /// permissions of the file (0600, read and write permissions only for the owner). +#[cfg(target_os = "unix")] fn save_key_pair_to_file(key_pair: &KeyPair, path: PathBuf) -> Result<()> { let private_key_hex = hex::encode(key_pair.private_key().as_bytes()); @@ -43,12 +46,22 @@ fn save_key_pair_to_file(key_pair: &KeyPair, path: PathBuf) -> Result<()> { file.sync_all()?; // Set permission for sensitive information - if cfg!(unix) { - use std::os::unix::fs::PermissionsExt; - let mut permissions = file.metadata()?.permissions(); - permissions.set_mode(0o600); - fs::set_permissions(path, permissions)?; - } + let mut permissions = file.metadata()?.permissions(); + permissions.set_mode(0o600); + fs::set_permissions(path, permissions)?; + + Ok(()) +} + +#[cfg(not(target_os = "unix"))] +fn save_key_pair_to_file(key_pair: &KeyPair, path: PathBuf) -> Result<()> { + let private_key_hex = hex::encode(key_pair.private_key().as_bytes()); + + // Make sure that directories exist and write file into it + fs::create_dir_all(path.parent().unwrap())?; + let mut file = File::create(&path)?; + file.write_all(private_key_hex.as_bytes())?; + file.sync_all()?; Ok(()) } From c9882d0c6aeed1832e1fc22e21d0774db3c4a7ae Mon Sep 17 00:00:00 2001 From: Andreas Dzialocha Date: Thu, 31 Aug 2023 17:55:59 +0200 Subject: [PATCH 16/18] Prepare README for v0.5.0 release (#532) * Add more points to features list * Same feature list on other file * Write a little bit about configuration * Update usage * Fix link * Add examples * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * Wrap lines * Correct XDG directory * Add resources section * Small edits * Add GraphQL query examples * Move query comment into code snippet * Move comments back out of query snippet * Move query examples into own section * Update "what can i build" section * Update README.md * Review change requests * Fix query to use unix timestamps * Minor additions to aquadoggo/README.md * Include README.md in lib doc string * Remove empty line * Minor change * Don't run code snippet in README during tests --------- Co-authored-by: Sam Andreae --- README.md | 288 ++++++++++++++++++++++++++++--- aquadoggo/README.md | 43 ++--- aquadoggo/src/lib.rs | 34 +--- aquadoggo_cli/README.md | 336 +++++++++++++++++++++++++++++++++++- aquadoggo_cli/src/config.rs | 4 +- 5 files changed, 617 insertions(+), 88 deletions(-) diff --git a/README.md b/README.md index 3164a7c65..d65e45fcb 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

aquadoggo

- Embeddable p2p network node + p2panda network node

@@ -43,17 +43,11 @@
-Configurable node server implementation for the [`p2panda`] network running as a -[`command line application`] or embedded via the [`library`] inside your Rust program. +`aquadoggo` is a reference node implementation for [p2panda](https://p2panda.org). It is a intended as a tool for making the design and build of local-first, collaborative p2p applications as simple as possible, and hopefully even a little fun! -> The core p2panda [`specification`] is fully functional but still under review so -> please be prepared for breaking API changes until we reach v1.0. Currently no -> p2panda implementation has received a security audit. +`aquadoggo` can run both on your own device for local-first applications, or on a public server when acting as shared community infrastructure. Nodes like `aquadoggo` perform a number of tasks ranging from core p2panda data replication and validation, aiding the discovery and establishment of connections between edge peers, and exposing a developer friendly API used for building applications. -[`command line application`]: /aquadoggo_cli -[`library`]: /aquadoggo -[`p2panda`]: https://p2panda.org/ -[`specification`]: https://p2panda.org/specification +> 📖 Read more about nodes in our [learn](https://p2panda.org/learn/networks) section ## Features @@ -61,13 +55,146 @@ Configurable node server implementation for the [`p2panda`] network running as a - Verifies the consistency, format and signature of operations and rejects invalid ones. - Stores operations of the network in an SQL database of your choice (SQLite, PostgreSQL). - Materializes views on top of the known data. -- Answers filterable and paginated data queries via GraphQL. +- Answers filtered, sorted and paginated data queries via GraphQL. - Discovers other nodes in local network and internet. -- Replicates data with other nodes. +- Establishes peer-to-peer connections via UDP holepunching or via relays. +- Replicates data efficiently with other nodes. -## Example +## Who is this for? -Embed the node server in your Rust application or web container like [`Tauri`]: +`aquadoggo` might be interesting for anyone who wants to participate in a p2p network. This could be as a node maintainer, an application developer or simply someone wanting to learn more about p2p networking in a hands-on fashion. + +If you are familiar with (or are keen to learn) how to use command line interfaces then you're able to deploy a node on your own machine, you can then experiment with creating data schemas, publishing and replicating data, and then querying it again using the GraphQL playground. Check out the [resources](#Resources) section for ideas on next steps when you're ready. + +## What can I build with this? + +Many applications which rely on being able to store and retrieve data from a persistent store could likely be built using `aquadoggo` as their data layer. `aquadoggo` can be considered as a p2p "backend", which takes some of the complexity out of p2p development, leaving you to focus on building applications using your preferred tools. + +> If you want to build a client application which communicates with an `aquadoggo` you will need to have some experience with web development or the Rust programming language. For writing an application using Rust you can import `aquadoggo` directly in your code. If building a TypeScript web frontend which will interface with a local or remote node, you can import the small TypeScript client library [`shirokuma`](https://github.com/p2panda/shirokuma) to your project. We have plans for making it easier to interact with `aquadoggo` using other languages in the future. + +Some example applications which could be built on top of `aquadoggo` are: + +- 🥄 **Community centre resource management:** Members of the centre want to manage some shared resources (table tennis, tools, cooking equipment), they each run an app ([Tauri](https://tauri.app/) desktop app with a bundled `aquadoggo` inside) on their own devices, where they can add resources, view availability and making loan requests. Discovery and syncing of data occurs automatically when member's devices are on the same local network. +
+ See config +
+ + ```toml + # Schemas needed for our resource management application + allow_schema_ids = [ + "resource_0020c3accb0b0c8822ecc0309190e23de5f7f6c82f660ce08023a1d74e055a3d7c4d", + "resource_booking_request_0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0", + "resource_booking_accepted_00209a75d6f1440c188fa52555c8cdd60b3988e468e1db2e469b7d4425a225eba8ec", + ] + + # Enable mDNS discovery to automatically find other nodes on the local network and share data with them + mdns = true + ``` +
+- 🐦 **Local ecology monitoring:** Village residents want to collect data on bird species which are sighted in their area over the year. They want anyone with the app to be able to upload a sighting. All the residents run a native Android app on their smartphone, and they make use of a number of relay nodes which enables discovery and p2p or relayed connection establishment. +
+ See config +
+ + _app node config_ + ```toml + # Schemas needed for our ecology monitoring application + allow_schema_ids = [ + "bird_species_0020c3accb0b0c8822ecc0309190e23de5f7f6c82f660ce08023a1d74e055a3d7c4d", + "bird_sighting_0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0", + ] + + # Addresses of the relay nodes helping us to connect the residents over the internet + relay_addresses = [ + "203.0.113.1:2022", + "198.51.100.21:2022", + ] + ``` + + _relay node config_ + ```toml + # A relay doesn't need to support any schemas + allow_schema_ids = [] + + # Enable relay mode + relay_mode = true + ``` +
+- 🗞️ **Coop notice boards:** residents of a group of housing coops want to start a collaborative notice board. Each coop deploys a node on their local network and residents access a web-app to post and view ads or news. They're already using a shared VPN so nodes can connect directly, but only some coops are allowed to join the noticeboard network. +
+ See config +
+ + ```toml + # Schemas needed for our coop notice board application + allow_schema_ids = [ + "notice_board_0020c3accb0b0c8822ecc0309190e23de5f7f6c82f660ce08023a1d74e055a3d7c4d", + "notice_board_post_0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0", + ] + + # Addresses of already known nodes we can connect directly to + direct_node_addresses = [ + "192.0.2.78:2022", + "198.51.100.22:2022", + "192.0.2.211:2022", + "203.0.114.123:2022", + ] + + # Peer ids of allowed peers, these will be the expected identities for the nodes we are connecting + # directly to + allowed_peer_ids = [ + "12D3KooWP1ahRHeNp6s1M9qDJD2oyqRsYFeKLYjcjmFxrq6KM8xd", + "12D3KooWPC9zdWXQ3aCEcxvuct9KUWU5tPsUT6KFo29Wf8jWRW24", + "12D3KooWDNNSdY8vxYKYZBGdfDTg1ZafxEVuEmh49jtF8rUeMkq2", + "12D3KooWMKiBvAxynLn7KmqbWdEzA8yq3of6yoLZF1cpmb4Z9fHf", + ] + ``` +
+ +We're excited to hear about your ideas! Join our [official chat](https://wald.liebechaos.org/) and reach out. + +## Installation + +### Command line application + +Check out our [Releases](/releases) section where we publish binaries for Linux, RaspberryPi, MacOS and Windows or read [how you can compile](/aquadoggo_cli/README.md#Installation) `aquadoggo` yourself. + +### Rust Crate + +For using `aquadoggo` in your Rust project, you can add it as a dependency with the following command: + +```bash +cargo add aquadoggo +``` + +## Usage + +### Run node + +You can also run the node simply as a [command line application](/aquadoggo_cli). `aquadoggo` can be configured in countless ways for your needs, read our [configuration](/aquadoggo_cli/README.md#Usage) section for more examples, usecases and an overview of configuration options. + +```bash +# Start a local node on your machine, go to http://localhost:2020/graphql for using the GraphQL playground +aquadoggo + +# Check out all configuration options +aquadoggo --help + +# Enable logging +aquadoggo --log-level info +``` + +### Docker + +For server deployments you might prefer using [Docker](https://hub.docker.com/r/p2panda/aquadoggo) to run `aquadoggo`. + +```bash +docker run -p 2020:2020 -p 2022:2022 -e LOG_LEVEL=info p2panda/aquadoggo +``` + +### Embed node + +Run the node directly next to the frontend you're building for full peer-to-peer applications by using the [`aquadoggo`](/aquadoggo) Rust crate. Check out our [Tauri](https://github.com/p2panda/tauri-example) example for writing a desktop app. ```rust use aquadoggo::{Configuration, Node}; @@ -78,29 +205,134 @@ let key_pair = KeyPair::new(); let node = Node::start(key_pair, config).await; ``` -You can also run the node server simply as a [command line application][`command line application`]: +### FFI bindings -```bash -# Compile and run local node at http://localhost:2020/graphql -cargo run +If you are not working with Rust you can create FFI bindings from the `aquadoggo` crate into your preferred programming language. Dealing with FFI bindings can be a bit cumbersome and we do not have much prepared for you (yet), but check out our [Meli](https://github.com/p2panda/meli/) Android project as an example on how we dealt with FFI bindings for Dart / Flutter. -# Enable logging -RUST_LOG=aquadoggo=info cargo run +## Query API + +As an application developer the interface to `aquadoggo` you are likely to use the most is the GraphQL query API. For whichever schema your node supports a custom query api is generated, you use this to fetch data into your app. Results from a collection query can be paginated, filtered. + +Fetch one "mushroom" by it's id, returning values for only the selected fields: +```graphql +{ + mushroom: mushroom_0020c3accb0b0c8822ecc0309190e23de5f7f6c82f660ce08023a1d74e055a3d7c4d( + id: "0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0" + ) { + fields { + description + edible + latin + title + } + } +} ``` -.. or run it inside a [Docker](https://hub.docker.com/r/p2panda/aquadoggo) container: +
+Example query response +
-```bash -docker run -p 2020:2020 -e RUST_LOG=aquadoggo=info p2panda/aquadoggo +```json +{ + "mushroom": { + "description": "Its scientific name rhacodes comes from the Greek word rhakos, which means a piece of cloth. It does often have a soft, ragged fabric-like appearance.", + "edible": true, + "latin": "Chlorophyllum rhacodes", + "title": "Shaggy parasol" + } +} ``` +
-[`Tauri`]: https://tauri.studio +A collection query for "events" which includes ordering and filtering as well as selecting some meta fields. Here only events between the specified dates and with a title containing the string 'funtastic' will be returned, they will be arranged in ascending chronological order: +```graphql +{ + events: all_events_0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0( + first: 20 + orderBy: "happening_at" + orderDirection: ASC + filter: { + title: { contains: "funtastic" } + happening_at: { gte: 1677676480, lte: 1696162480 } + } + ) { + totalCount + documents { + meta { + owner + documentId + viewId + } + fields { + title + happening_at + } + } + } +} +``` -## Installation +
+Example query response +
-```sh -cargo add aquadoggo +```json +{ + "events": { + "totalCount": 2, + "documents": [ + { + "meta": { + "owner": "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96", + "documentId": "0020f3214a136fd6d0a649e14432409bb28a59a6caf723fa329129c404c92574cb41", + "viewId": "00206e365e3a6a9b66dfe96ea4b3b3b7c61b250330a46b0c99134121603db5feef11" + }, + "fields": { + "title": "Try funtasticize!!", + "happening_at": 1680264880 + } + }, + { + "meta": { + "owner": "2f8e50c2ede6d936ecc3144187ff1c273808185cfbc5ff3d3748d1ff7353fc96", + "documentId": "002048a55d9265a16ba44b5f3be3e457238e02d3219ecca777d7b4edf28ba2f6d011", + "viewId": "002048a55d9265a16ba44b5f3be3e457238e02d3219ecca777d7b4edf28ba2f6d011" + }, + "fields": { + "title": "Is funtastic even a real word?", + "happening_at": 1693484080 + } + } + ] + } +} ``` +
+ +From these examples you might already see how useful the query api will be when retrieving and displaying data in your application. + +## Resources + +- 🐬 Deploy your own `aquadoggo` following the [tutorial](https://p2panda.org/tutorials/aquadoggo) +- 🛠️ Create your own schemas using [`fishy`](https://github.com/p2panda/fishy) +- 🛼 Open the GraphQL playground in your browser, served under `http://localhost:2020/graphql` +- 📖 Try the [mushroom app tutorial](https://p2panda.org/tutorials/mushroom-app) +- 🔬 Manually publish data to a node [`send-to-node`](https://github.com/p2panda/send-to-node) +- 🐼 [Learn more](https://p2panda.org/learn/) about how p2panda works + +## What shouldn't I do with `aquadoggo`? + +`aquadoggo` is built using the [p2panda](https://p2panda.org) protocol which is in development and some planned features are still missing, the main ones being: + +- **Capabilities:** Currently all data can be edited by any author who has access to the network. In many cases, permissions can be handled where needed on the client side (planned mid-2024). +- **Privacy:** While node communication is encrypted with TLS the data stored on nodes itself is not. Integration of [MLS](https://p2panda.org/specification/encryption/) is underway but not complete yet. +- **Deletion:** Network-wide purging of data is dependent on having a capabilities system already in place, so these two features will arrive together. +- **Anonymity:** Networking exposes sensitive data about your used devices, we're waiting for [Arti](https://tpo.pages.torproject.net/core/arti/) supporting Onion Services to make this a configurable option. + +As well as these yet-to-be implemented features, there are also general networking concerns (exposing your IP address, sharing data with untrusted peers) that you should take into account when participating in any network, and particularily in peer-to-peer networks. + +So although `aquadoggo` is already very useful in many cases, there are others where it won't be a good fit yet or we would actively warn against use. For now, any uses which would be handling especially sensitive data are not recommended, and any users who have special network security requirements need to take extra precautions. Reach out on our [official chat](https://wald.liebechaos.org/) if you have any questions. ## License @@ -114,4 +346,4 @@ GNU Affero General Public License v3.0 [`AGPL-3.0-or-later`](LICENSE) *This project has received funding from the European Union’s Horizon 2020 research and innovation programme within the framework of the NGI-POINTER -Project funded under grant agreement No 871528* +Project funded under grant agreement No 871528 and NGI-ASSURE No 957073* diff --git a/aquadoggo/README.md b/aquadoggo/README.md index 17f36c43c..591c71295 100644 --- a/aquadoggo/README.md +++ b/aquadoggo/README.md @@ -43,15 +43,12 @@
-Configurable node server implementation for the [`p2panda`] network which can -be embedded inside your Rust program. +`aquadoggo` is a reference node implementation for [p2panda](https://p2panda.org). It is a intended as a tool for making the design and build of local-first, collaborative p2p applications as simple as possible, and hopefully even a little fun! -> The core p2panda [`specification`] is fully functional but still under review so -> please be prepared for breaking API changes until we reach v1.0. Currently no -> p2panda implementation has recieved a security audit. +`aquadoggo` can run both on your own device for local-first applications, or on a public server when acting as shared community infrastructure. Nodes like `aquadoggo` perform a number of tasks ranging from core p2panda data replication and validation, aiding the discovery and establishment of connections between edge peers, and exposing a developer friendly API used for building applications. -[`p2panda`]: https://p2panda.org -[`specification`]: https://p2panda.org/specification +> 📖 Read more about nodes in our [learn](https://p2panda.org/learn/networks) section
+> 🐬 Visit the main repo [README](https://github.com/p2panda/aquadoggo) for more general info ## Features @@ -59,15 +56,24 @@ be embedded inside your Rust program. - Verifies the consistency, format and signature of operations and rejects invalid ones. - Stores operations of the network in an SQL database of your choice (SQLite, PostgreSQL). - Materializes views on top of the known data. -- Answers filterable and paginated data queries via GraphQL. +- Answers filtered, sorted and paginated data queries via GraphQL. - Discovers other nodes in local network and internet. -- Replicates data with other nodes. +- Establishes peer-to-peer connections via UDP holepunching or via relays. +- Replicates data efficiently with other nodes. + +## Installation + +For using `aquadoggo` in your Rust project, you can add it as a dependency with the following command: + +```bash +cargo add aquadoggo +``` ## Example -Embed the node server in your Rust application or web container like [`Tauri`]: +Run the node directly next to the frontend you're building for full peer-to-peer applications. Check out our [Tauri](https://github.com/p2panda/tauri-example) example for writing a desktop app. -```rust +```rust,ignore use aquadoggo::{Configuration, Node}; use p2panda_rs::identity::KeyPair; @@ -76,17 +82,13 @@ let key_pair = KeyPair::new(); let node = Node::start(key_pair, config).await; ``` -[`Tauri`]: https://tauri.studio - -## Installation +### FFI bindings -With [`cargo-edit`](https://github.com/killercup/cargo-edit) installed run: +If you are not working with Rust you can create FFI bindings from the `aquadoggo` crate into your preferred programming language. Dealing with FFI bindings can be a bit cumbersome and we do not have much prepared for you (yet), but check out our [Meli](https://github.com/p2panda/meli/) Android project as an example on how we dealt with FFI bindings for Dart / Flutter. -```bash -$ cargo add aquadoggo -``` +### Command line application -[`cargo-edit`]: https://github.com/killercup/cargo-edit +Check out our [Releases](/releases) section where we publish binaries for Linux, RaspberryPi, MacOS and Windows. ## Development @@ -107,8 +109,9 @@ GNU Affero General Public License v3.0 [`AGPL-3.0-or-later`](LICENSE) ## Supported by
+
*This project has received funding from the European Union’s Horizon 2020 research and innovation programme within the framework of the NGI-POINTER -Project funded under grant agreement No 871528* +Project funded under grant agreement No 871528 and NGI-ASSURE No 957073* diff --git a/aquadoggo/src/lib.rs b/aquadoggo/src/lib.rs index 2c4eb447f..c7dc93919 100644 --- a/aquadoggo/src/lib.rs +++ b/aquadoggo/src/lib.rs @@ -1,38 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -//! # aquadoggo -//! -//! Configurable node server implementation for the [`p2panda`] network. -//! -//! [`p2panda`]: https://p2panda.org -//! -//! ## Features -//! -//! - Awaits signed operations from clients via GraphQL. -//! - Verifies the consistency, format and signature of operations and rejects invalid ones. -//! - Stores operations of the network in an SQL database of your choice (SQLite, PostgreSQL). -//! - Materializes views on top of the known data. -//! - Answers filterable and paginated data queries via GraphQL. -//! - Discovers other nodes in local network and internet. -//! - Replicates data with other nodes. -//! -//! ## Example -//! -//! Embed the node server in your Rust application or web container like [`Tauri`]: -//! -//! ```rust,no_run -//! # #[tokio::main] -//! # async fn main() { -//! use aquadoggo::{Configuration, Node}; -//! use p2panda_rs::identity::KeyPair; -//! -//! let config = Configuration::default(); -//! let key_pair = KeyPair::new(); -//! let node = Node::start(key_pair, config).await; -//! # } -//! ``` -//! -//! [`Tauri`]: https://tauri.studio +#![doc = include_str!("../README.md")] #![warn( missing_debug_implementations, missing_docs, diff --git a/aquadoggo_cli/README.md b/aquadoggo_cli/README.md index 6b29ab298..8317da499 100644 --- a/aquadoggo_cli/README.md +++ b/aquadoggo_cli/README.md @@ -1,13 +1,333 @@ -# aquadoggo CLI +

aquadoggo CLI

-Node server with GraphQL API for the p2panda network. +
+ p2panda network node +
+ +
+ +
+ +
+ +Configurable node for the [`p2panda`] network, which runs as a command line +application on any computer, single-board or server. + +## Installation + +### Pre-compiled binaries + +Check out our [Releases](/releases) section where we publish binaries for +Linux, RaspberryPi, MacOS and Windows. + +### Compile it yourself + +For the following steps you need a +[Rust](https://www.rust-lang.org/learn/get-started) development environment on +your machine. + +```bash +# Download source code +git clone https://github.com/p2panda/aquadoggo.git +cd aquadoggo + +# Compile binary +cargo build --release + +# Copy binary into your path (example) +cp ./target/release/aquadoggo ~/.local/bin +``` + +## Examples + +```bash +# For experimental setups it is enough to just start the node! +aquadoggo + +# Enable logging +aquadoggo --log-level info + +# By default the config.toml is loaded from the same folder or from the XDG +# config directory, but you can also specify a custom path +aquadoggo -c ../config.toml + +# Turn your aquadoggo into a relay +aquadoggo --relay-mode + +# Check out the config.toml file for more options or consult the help menu. You +# might need it for more sophisticated setups +aquadoggo --help +``` + +## Usage + +`aquadoggo` is a powerful node implementation which can run in very different +setups during development and in production. It can be configured through a +[`config.toml`] file, environment variables and command line arguments, +depending on your needs. + +### Common setups + +#### Support only certain schemas + +> "I want to run a node which only replicates and serves data from a limited +> set of schemas. In this case it's schemas required by a mushroom sighting +> app." + +```toml +allow_schema_ids = [ + "mushroom_0020c3accb0b0c8822ecc0309190e23de5f7f6c82f660ce08023a1d74e055a3d7c4d", + "mushroom_finding_0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0" +] +``` + +#### Support all schemas but restrict allowed peers + +> "Me and my friends are running our own `p2panda` network supported by a +> couple of relay nodes, we trust each other and want to support all published +> data, but not allow anyone else to join the network." + +```toml +# I can do this by configuring `allow_schema_ids` to be a wildcard, meaning any +# schema are automatically supported! +allow_schema_ids = "*" + +# Add relay addresses to allow establishing connections over the internet +relay_addresses = [ + "203.0.113.12:2022", + "203.0.113.34:2022", +] + +# Enable discovery using mDNS (true by default) +mdns = true + +# Populate an allow list which will contain the peer ids of our friends +# (including the ones acting as relays) +allow_peer_ids = [ + "12D3KooWLxGKMgUtekXam9JsSjMa3b7M3rYEYUYUywdehHTRrLgU", + "12D3KooWP1ahRHeNp6s1M9qDJD2oyqRsYFeKLYjcjmFxrq6KM8xd", +] +``` + +#### Act as a relay node + +> "I want to deploy a relay which assists in connecting edge peers but doesn't +> persist any data itself." + +```toml +# I can do this by configuring `allow_schema_ids` an empty list, meaning this +# node does not support any schemas +allow_schema_ids = [] + +# Then set the relay flag so this node behaves as a relay for other nodes +relay_mode = true +``` + +#### Directly connect to known peers + +> "I want my node to connect to a list of known and accessible peers." + +```toml +# Allow all schemas +allow_schema_ids = "*" + +# Address of nodes with static IP addresses we can connect directly to +direct_node_addresses = [ + "192.0.2.78:2022", + "198.51.100.22:2022", + "192.0.2.211:2022", + "203.0.114.123:2022", +] +``` + +#### Persist node identity and database + +> "I want my node to persist it's identity and database on the filesystem and +> retreive them whenever it runs again." + +```toml +# Persist node private key at given location (using Linux XDG paths as an example) +private_key = "$HOME/.local/share/aquadoggo/private-key.txt" + +# Persist SQLite database at given location +database_url = "sqlite:$HOME/.local/share/aquadoggo/db.sqlite3" +``` + +### Configuration + +Check out the [`config.toml`] file for all configurations and documentation or +run `--help` to see all possible command line arguments. All values can also be +defined as environment variables, written in CAMEL_CASE (for example +`HTTP_PORT=3000`). + +``` +Usage: aquadoggo [OPTIONS] + +Options: + -c, --config + Path to an optional "config.toml" file for further configuration. + + When not set the program will try to find a `config.toml` file in the + same folder the program is executed in and otherwise in the regarding + operation systems XDG config directory + ("$HOME/.config/aquadoggo/config.toml" on Linux). + + -s, --allow-schema-ids [...] + List of schema ids which a node will replicate, persist and expose on + the GraphQL API. Separate multiple values with a whitespace. Defaults + to allow _any_ schemas ("*"). + + When allowing a schema you automatically opt into announcing, + replicating and materializing documents connected to it, supporting + applications and networks which are dependent on this data. + + It is recommended to set this list to all schema ids your own + application should support, including all important system schemas. + + WARNING: When set to wildcard "*", your node will support _any_ + schemas it will encounter on the network. This is useful for + experimentation and local development but _not_ recommended for + production settings. + + -d, --database-url + URL / connection string to PostgreSQL or SQLite database. Defaults to + an in-memory SQLite database. + + WARNING: By default your node will not persist anything after + shutdown. Set a database connection url for production settings to + not loose data. + + -p, --http-port + HTTP port for client-node communication, serving the GraphQL API. + Defaults to 2020 + + -q, --quic-port + QUIC port for node-node communication and data replication. Defaults + to 2022 + + -k, --private-key + Path to persist your ed25519 private key file. Defaults to an + ephemeral key only for this current session. + + The key is used to identify you towards other nodes during network + discovery and replication. This key is _not_ used to create and sign + data. + + If a path is set, a key will be generated newly and stored under this + path when node starts for the first time. + + When no path is set, your node will generate an ephemeral private key + on every start up and _not_ persist it. + + -m, --mdns [] + mDNS to discover other peers on the local network. Enabled by default + + [possible values: true, false] + + -n, --direct-node-addresses [...] + List of known node addresses we want to connect to directly. + + Make sure that nodes mentioned in this list are directly reachable + (they need to be hosted with a static IP Address). If you need to + connect to nodes with changing, dynamic IP addresses or even with + nodes behind a firewall or NAT, do not use this field but use at + least one relay. + + -a, --allow-peer-ids [...] + List of peers which are allowed to connect to your node. + + If set then only nodes (identified by their peer id) contained in + this list will be able to connect to your node (via a relay or + directly). When not set any other node can connect to yours. + + Peer IDs identify nodes by using their hashed public keys. They do + _not_ represent authored data from clients and are only used to + authenticate nodes towards each other during networking. + + Use this list for example for setups where the identifier of the + nodes you want to form a network with is known but you still need to + use relays as their IP addresses change dynamically. + + -b, --block-peer-ids [...] + List of peers which will be blocked from connecting to your node. + + If set then any peers (identified by their peer id) contained in this + list will be blocked from connecting to your node (via a relay or + directly). When an empty list is provided then there are no + restrictions on which nodes can connect to yours. + + Block lists and allow lists are exclusive, which means that you + should _either_ use a block list _or_ an allow list depending on your + setup. + + Use this list for example if you want to allow _any_ node to connect + to yours _except_ of a known number of excluded nodes. + + -r, --relay-addresses [...] + List of relay addresses. + + A relay helps discover other nodes on the internet (also known as + "rendesvouz" or "bootstrap" server) and helps establishing direct p2p + connections when node is behind a firewall or NAT (also known as + "holepunching"). + + WARNING: This will potentially expose your IP address on the network. + Do only connect to trusted relays or make sure your IP address is + hidden via a VPN or proxy if you're concerned about leaking your IP. + + -e, --relay-mode [] + Enable if node should also function as a relay. Disabled by default. + + Other nodes can use relays to aid discovery and establishing connectivity. + + Relays _need_ to be hosted in a way where they can be reached + directly, for example with a static IP address through an VPS. + + [possible values: true, false] + + -l, --log-level + Set log verbosity. Use this for learning more about how your node + behaves or for debugging. + + Possible log levels are: ERROR, WARN, INFO, DEBUG, TRACE. They are + scoped to "aquadoggo" by default. + + If you want to adjust the scope for deeper inspection use a filter + value, for example "=TRACE" for logging _everything_ or + "aquadoggo=INFO,libp2p=DEBUG" etc. + + -h, --help + Print help (see a summary with '-h') + + -V, --version + Print version +``` ## Development ```bash -cargo run +# Run node during development with logging enabled +cargo run -- --log-level debug + +# Show logs of all modules +cargo run -- --log-level "=debug" + +# Run tests cargo test -cargo build ``` ## License @@ -17,6 +337,12 @@ GNU Affero General Public License v3.0 [`AGPL-3.0-or-later`](LICENSE) ## Supported by
+
-*This project has received funding from the European Union’s Horizon 2020 research and innovation programme within the framework of the NGI-POINTER Project funded under grant agreement No 871528* +*This project has received funding from the European Union’s Horizon 2020 +research and innovation programme within the framework of the NGI-POINTER +Project funded under grant agreement No 871528 and NGI-ASSURE No 957073* + +[`config.toml`]: config.toml +[`p2panda`]: https://p2panda.org diff --git a/aquadoggo_cli/src/config.rs b/aquadoggo_cli/src/config.rs index b0f42f6a3..f9b51a77d 100644 --- a/aquadoggo_cli/src/config.rs +++ b/aquadoggo_cli/src/config.rs @@ -67,8 +67,8 @@ pub fn load_config() -> Result<(ConfigFilePath, Configuration)> { /// been set. #[derive(Parser, Serialize, Debug)] #[command( - name = "aquadoggo Node", - about = "Node server for the p2panda network", + name = "aquadoggo", + about = "Node for the p2panda network", long_about = None, version )] From 1998d46e58990369bb0c7e76092d251ec1ba53e2 Mon Sep 17 00:00:00 2001 From: Sam Andreae Date: Thu, 31 Aug 2023 17:03:12 +0100 Subject: [PATCH 17/18] 0.5.0 --- Cargo.lock | 4 ++-- aquadoggo/Cargo.toml | 2 +- aquadoggo_cli/Cargo.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e24765740..4c412a346 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -156,7 +156,7 @@ checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854" [[package]] name = "aquadoggo" -version = "0.4.0" +version = "0.5.0" dependencies = [ "anyhow", "async-graphql", @@ -209,7 +209,7 @@ dependencies = [ [[package]] name = "aquadoggo_cli" -version = "0.2.0" +version = "0.3.0" dependencies = [ "anyhow", "aquadoggo", diff --git a/aquadoggo/Cargo.toml b/aquadoggo/Cargo.toml index c528951ec..7dd1a5652 100644 --- a/aquadoggo/Cargo.toml +++ b/aquadoggo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "aquadoggo" -version = "0.4.0" +version = "0.5.0" authors = [ "adz ", "cafca ", diff --git a/aquadoggo_cli/Cargo.toml b/aquadoggo_cli/Cargo.toml index a3b36ab74..6bc76a973 100644 --- a/aquadoggo_cli/Cargo.toml +++ b/aquadoggo_cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "aquadoggo_cli" -version = "0.2.0" +version = "0.3.0" authors = [ "adz ", "cafca ", @@ -36,7 +36,7 @@ tokio = { version = "1.28.2", features = ["full"] } toml = "0.7.6" [dependencies.aquadoggo] -version = "~0.4.0" +version = "~0.5.0" path = "../aquadoggo" [dev-dependencies] From 02dcffe25a7e5fcf69e3c35f99ed8a3300da5c15 Mon Sep 17 00:00:00 2001 From: Andreas Dzialocha Date: Fri, 1 Sep 2023 10:56:21 +0200 Subject: [PATCH 18/18] Minor README.md updates --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index d65e45fcb..4b354de35 100644 --- a/README.md +++ b/README.md @@ -211,9 +211,10 @@ If you are not working with Rust you can create FFI bindings from the `aquadoggo ## Query API -As an application developer the interface to `aquadoggo` you are likely to use the most is the GraphQL query API. For whichever schema your node supports a custom query api is generated, you use this to fetch data into your app. Results from a collection query can be paginated, filtered. +As an application developer the interface you are likely to use the most is the GraphQL query API. For whichever schema your node supports a custom query API is generated, you use this to fetch data into your app. Results from a collection query can be paginated, sorted and filtered. Fetch one "mushroom" by it's id, returning values for only the selected fields: + ```graphql { mushroom: mushroom_0020c3accb0b0c8822ecc0309190e23de5f7f6c82f660ce08023a1d74e055a3d7c4d( @@ -245,7 +246,8 @@ Fetch one "mushroom" by it's id, returning values for only the selected fields: ``` -A collection query for "events" which includes ordering and filtering as well as selecting some meta fields. Here only events between the specified dates and with a title containing the string 'funtastic' will be returned, they will be arranged in ascending chronological order: +Fetch all "events" with ordering and filtering as well as selecting some meta fields. Here only events between the specified dates and with a title containing the string 'funtastic' will be returned, they will be arranged in ascending chronological order: + ```graphql { events: all_events_0020aaabb3edecb2e8b491b0c0cb6d7d175e4db0e9da6003b93de354feb9c52891d0( @@ -310,8 +312,6 @@ A collection query for "events" which includes ordering and filtering as well as ``` -From these examples you might already see how useful the query api will be when retrieving and displaying data in your application. - ## Resources - 🐬 Deploy your own `aquadoggo` following the [tutorial](https://p2panda.org/tutorials/aquadoggo) @@ -328,7 +328,7 @@ From these examples you might already see how useful the query api will be when - **Capabilities:** Currently all data can be edited by any author who has access to the network. In many cases, permissions can be handled where needed on the client side (planned mid-2024). - **Privacy:** While node communication is encrypted with TLS the data stored on nodes itself is not. Integration of [MLS](https://p2panda.org/specification/encryption/) is underway but not complete yet. - **Deletion:** Network-wide purging of data is dependent on having a capabilities system already in place, so these two features will arrive together. -- **Anonymity:** Networking exposes sensitive data about your used devices, we're waiting for [Arti](https://tpo.pages.torproject.net/core/arti/) supporting Onion Services to make this a configurable option. +- **Anonymity:** Networking exposes sensitive data, we're waiting for [Arti](https://tpo.pages.torproject.net/core/arti/) supporting Onion Services to make this a configurable option. As well as these yet-to-be implemented features, there are also general networking concerns (exposing your IP address, sharing data with untrusted peers) that you should take into account when participating in any network, and particularily in peer-to-peer networks.