Skip to content

Commit d38ac65

Browse files
Upgrade litep2p to v0.12.0 and handle Kademlia publish success events (#9685)
litep2p v0.12.0 adds ability to track whether publishing a DHT record or provider was successful. This PR brings this functionality to substrate. Particularly, this fixes authority-discovery unnecessarily republishing DHT records due to litep2p not emitting `KademliaEvent::PutRecordSuccess` before v0.12.0. --------- Co-authored-by: cmd[bot] <41898282+github-actions[bot]@users.noreply.github.com>
1 parent ed00ea3 commit d38ac65

File tree

6 files changed

+208
-32
lines changed

6 files changed

+208
-32
lines changed

Cargo.lock

Lines changed: 96 additions & 23 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -908,7 +908,7 @@ linked-hash-map = { version = "0.5.4" }
908908
linked_hash_set = { version = "0.1.4" }
909909
linregress = { version = "0.5.1" }
910910
lite-json = { version = "0.2.0", default-features = false }
911-
litep2p = { version = "0.11.0", features = ["rsa", "websocket"] }
911+
litep2p = { version = "0.12.0", features = ["rsa", "websocket"] }
912912
log = { version = "0.4.22", default-features = false }
913913
macro_magic = { version = "0.5.1" }
914914
maplit = { version = "1.0.2" }

prdoc/pr_9685.prdoc

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
title: Upgrade litep2p to v0.12.0
2+
doc:
3+
- audience: Node Dev
4+
description: litep2p v0.12.0 adds ability to track whether publishing a DHT record
5+
or provider was successful. This PR brings this functionality to substrate. Particularly,
6+
this fixes authority-discovery unnecessarily republishing DHT records due to litep2p
7+
not emitting `KademliaEvent::PutRecordSuccess` before v0.12.0.
8+
crates:
9+
- name: sc-network
10+
bump: major
11+
- name: sc-network-types
12+
bump: major

substrate/client/network/src/event.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,14 +48,12 @@ pub enum DhtEvent {
4848
ValueNotFound(Key),
4949

5050
/// The record has been successfully inserted into the DHT.
51-
// TODO: this is not implemented with litep2p network backend.
5251
ValuePut(Key),
5352

5453
/// An error has occurred while putting a record into the DHT.
5554
ValuePutFailed(Key),
5655

5756
/// Successfully started providing the given key.
58-
// TODO: this is not implemented with litep2p network backend.
5957
StartedProviding(Key),
6058

6159
/// An error occured while registering as a content provider on the DHT.

substrate/client/network/src/litep2p/discovery.rs

Lines changed: 40 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,16 @@ const MAX_EXTERNAL_ADDRESSES: u32 = 32;
8080
/// external.
8181
const MIN_ADDRESS_CONFIRMATIONS: usize = 3;
8282

83+
/// Quorum threshold to interpret `PUT_VALUE` & `ADD_PROVIDER` as successful.
84+
///
85+
/// As opposed to libp2p, litep2p does not finish the query as soon as the required number of
86+
/// peers have reached. Instead, it tries to put the record to all target peers (typically 20) and
87+
/// uses the quorum setting only to determine the success of the query.
88+
///
89+
/// We set the threshold to 50% of the target peers to account for unreachable peers. The actual
90+
/// number of stored records may be higher.
91+
const QUORUM_THRESHOLD: NonZeroUsize = NonZeroUsize::new(10).expect("10 > 0; qed");
92+
8393
/// Discovery events.
8494
#[derive(Debug)]
8595
pub enum DiscoveryEvent {
@@ -174,6 +184,14 @@ pub enum DiscoveryEvent {
174184
providers: Vec<ContentProvider>,
175185
},
176186

187+
/// Provider was successfully published.
188+
AddProviderSuccess {
189+
/// Query ID.
190+
query_id: QueryId,
191+
/// Provided key.
192+
provided_key: RecordKey,
193+
},
194+
177195
/// Query failed.
178196
QueryFailed {
179197
/// Query ID.
@@ -401,7 +419,10 @@ impl Discovery {
401419
/// Publish value on the DHT using Kademlia `PUT_VALUE`.
402420
pub async fn put_value(&mut self, key: KademliaKey, value: Vec<u8>) -> QueryId {
403421
self.kademlia_handle
404-
.put_record(Record::new(RecordKey::new(&key.to_vec()), value))
422+
.put_record(
423+
Record::new(RecordKey::new(&key.to_vec()), value),
424+
Quorum::N(QUORUM_THRESHOLD),
425+
)
405426
.await
406427
}
407428

@@ -417,6 +438,9 @@ impl Discovery {
417438
record,
418439
peers.into_iter().map(|peer| peer.into()).collect(),
419440
update_local_storage,
441+
// These are the peers that just returned the record to us in authority-discovery,
442+
// so we assume they are all reachable.
443+
Quorum::All,
420444
)
421445
.await
422446
}
@@ -446,8 +470,10 @@ impl Discovery {
446470
}
447471

448472
/// Start providing `key`.
449-
pub async fn start_providing(&mut self, key: KademliaKey) {
450-
self.kademlia_handle.start_providing(key.into()).await;
473+
pub async fn start_providing(&mut self, key: KademliaKey) -> QueryId {
474+
self.kademlia_handle
475+
.start_providing(key.into(), Quorum::N(QUORUM_THRESHOLD))
476+
.await
451477
}
452478

453479
/// Stop providing `key`.
@@ -680,6 +706,17 @@ impl Stream for Discovery {
680706
providers,
681707
}))
682708
},
709+
Poll::Ready(Some(KademliaEvent::AddProviderSuccess { query_id, provided_key })) => {
710+
log::trace!(
711+
target: LOG_TARGET,
712+
"`ADD_PROVIDER` for {query_id:?} with {provided_key:?} succeeded",
713+
);
714+
715+
return Poll::Ready(Some(DiscoveryEvent::AddProviderSuccess {
716+
query_id,
717+
provided_key,
718+
}))
719+
},
683720
// We do not validate incoming providers.
684721
Poll::Ready(Some(KademliaEvent::IncomingProvider { .. })) => {},
685722
}

0 commit comments

Comments
 (0)