Skip to content

Commit

Permalink
deal with VidDisperseRecv in consensus task, add the condition that o…
Browse files Browse the repository at this point in the history
…nly vote after getting shares
  • Loading branch information
dailinsubjam committed Nov 30, 2023
1 parent 389d4e2 commit 97aba4f
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 41 deletions.
2 changes: 1 addition & 1 deletion crates/hotshot/src/tasks/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ pub async fn add_consensus_task<TYPES: NodeType, I: NodeImplementation<TYPES>>(
event_stream: event_stream.clone(),
output_event_stream: output_stream,
da_certs: HashMap::new(),
vid_certs: HashMap::new(),
vid_shares: HashMap::new(),
current_proposal: None,
id: handle.hotshot.inner.id,
public_key: c_api.public_key().clone(),
Expand Down
69 changes: 61 additions & 8 deletions crates/task-impls/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@ use hotshot_task::{
};
use hotshot_types::{
consensus::{Consensus, View},
data::{Leaf, QuorumProposal, VidCommitment},
data::{Leaf, QuorumProposal, VidCommitment, VidDisperse},
event::{Event, EventType},
message::{GeneralConsensusMessage, Proposal},
simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate, VIDCertificate},
simple_certificate::{DACertificate, QuorumCertificate, TimeoutCertificate},
simple_vote::{QuorumData, QuorumVote, TimeoutData, TimeoutVote},
traits::{
block_contents::BlockHeader,
Expand Down Expand Up @@ -117,8 +117,8 @@ pub struct ConsensusTaskState<
/// All the DA certs we've received for current and future views.
pub da_certs: HashMap<TYPES::Time, DACertificate<TYPES>>,

/// All the VID certs we've received for current and future views.
pub vid_certs: HashMap<TYPES::Time, VIDCertificate<TYPES>>,
/// All the VID shares we've received for current and future views.
pub vid_shares: HashMap<TYPES::Time, Proposal<TYPES, VidDisperse<TYPES>>>,

/// The most recent proposal we have, will correspond to the current view if Some()
/// Will be none if the view advanced through timeout/view_sync
Expand Down Expand Up @@ -365,9 +365,15 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>, A: ConsensusApi<TYPES, I> +
}
}

// Only vote if you has seen the VID share for this view
if let Some(_vid_share) = self.vid_shares.get(&proposal.view_number) {
} else {
error!("We have not seen the VID share for this view {:?} yet, so we cannot vote.", proposal.view_number);
return false;
}

// Only vote if you have the DA cert
// ED Need to update the view number this is stored under?
// Sishan NOTE TODO: Add the logic of "it does not vote until it has seen its VID share"
if let Some(cert) = self.da_certs.get(&(proposal.get_view_number())) {
let view = cert.view_number;
// TODO: do some of this logic without the vote token check, only do that when voting.
Expand Down Expand Up @@ -1101,20 +1107,66 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>, A: ConsensusApi<TYPES, I> +
self.current_proposal = None;
}
}
HotShotEvent::VidDisperseRecv(disperse, sender) => {
let view = disperse.data.get_view_number();

debug!("VID disperse received for view: {:?} in consensus task", view);

// stop polling for the received disperse
self.quorum_network
.inject_consensus_info(ConsensusIntentEvent::CancelPollForVIDDisperse(
*disperse.data.view_number,
))
.await;

// Allow VID disperse date that is one view older, in case we have updated the
// view.
// Adding `+ 1` on the LHS rather than `- 1` on the RHS, to avoid the overflow
// error due to subtracting the genesis view number.
if view + 1 < self.cur_view {
warn!("Throwing away VID disperse data that is more than one view older");
return;
}

debug!("VID disperse data is fresh.");
let payload_commitment = disperse.data.payload_commitment;

// Check whether the sender is the right leader for this view
let view_leader_key = self.committee_membership.get_leader(view);
if view_leader_key != sender {
error!("VID dispersal/share is not from expected leader key for view {} \n", *view);
return;
}

if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) {
error!("Could not verify VID dispersal/share sig.");
return;
}

// Add to the storage that we have received the VID disperse for a specific view
self.vid_shares.insert(view, disperse.clone());
}
HotShotEvent::VidCertRecv(cert) => {
debug!("VID cert received for view ! {}", *cert.view_number);

let view = cert.get_view_number();
self.vid_certs.insert(view, cert);
let _view = cert.get_view_number();
// Sishan NOTE TODO: delete it
// RM TODO: VOTING
}

HotShotEvent::ViewChange(new_view) => {
debug!("View Change event for view {}", *new_view);
debug!("View Change event for view {} in consensus task", *new_view);

let old_view_number = self.cur_view;

// Start polling for VID disperse for the new view
self.quorum_network
.inject_consensus_info(ConsensusIntentEvent::PollForVIDDisperse(
*old_view_number + 1,
))
.await;


// update the view in state to the one in the message
// Publish a view change event to the application
if !self.update_view(new_view).await {
Expand Down Expand Up @@ -1340,6 +1392,7 @@ pub fn consensus_event_filter<TYPES: NodeType>(event: &HotShotEvent<TYPES>) -> b
| HotShotEvent::SendPayloadCommitmentAndMetadata(_, _)
| HotShotEvent::Timeout(_)
| HotShotEvent::TimeoutVoteRecv(_)
| HotShotEvent::VidDisperseRecv(_, _)
| HotShotEvent::Shutdown,
)
}
36 changes: 4 additions & 32 deletions crates/task-impls/src/vid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use hotshot_types::{
};
use hotshot_types::{
simple_certificate::VIDCertificate,
simple_vote::{VIDData, VIDVote},
simple_vote::VIDVote,
traits::network::CommunicationChannel,
vote::{HasViewNumber, VoteAccumulator},
};
Expand Down Expand Up @@ -243,7 +243,7 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>, A: ConsensusApi<TYPES, I> +
HotShotEvent::VidDisperseRecv(disperse, sender) => {
let view = disperse.data.get_view_number();

debug!("VID disperse received for view: {:?}", view);
debug!("VID disperse received for view: {:?} in VID task", view);

// stop polling for the received disperse
self.network
Expand All @@ -263,47 +263,19 @@ impl<TYPES: NodeType, I: NodeImplementation<TYPES>, A: ConsensusApi<TYPES, I> +

debug!("VID disperse data is fresh.");
let payload_commitment = disperse.data.payload_commitment;
// Sishan NOTE TODO: Add to the storage that we have received the VID disperse for a specific view / block

// Check whether the sender is the right leader for this view
let view_leader_key = self.membership.get_leader(view);
if view_leader_key != sender {
error!("VID proposal doesn't have expected leader key for view {} \n DA proposal is: [N/A for VID]", *view);
error!("VID dispersal/share is not from expected leader key for view {} \n", *view);
return None;
}

if !view_leader_key.validate(&disperse.signature, payload_commitment.as_ref()) {
error!("Could not verify VID proposal sig.");
error!("Could not verify VID dispersal/share sig.");
return None;
}

// Generate and send vote after receive and validate disperse (VID share)
let vote = VIDVote::create_signed_vote(
VIDData {
payload_commit: payload_commitment,
},
view,
&self.public_key,
&self.private_key,
);

debug!(
"Sending vote to the VID leader {:?}",
vote.get_view_number()
);
self.event_stream
.publish(HotShotEvent::VidVoteSend(vote))
.await;

// Sishan NOTE TODO: what is consensus.state_map?
// let mut consensus = self.consensus.write().await;
// // Ensure this view is in the view map for garbage collection, but do not overwrite if
// // there is already a view there: the replica task may have inserted a `Leaf` view which
// // contains strictly more information.
// consensus.state_map.entry(view).or_insert(View {
// view_inner: ViewInner::DA { payload_commitment, },
// });

// Record the block we have promised to make available.
// TODO https://github.com/EspressoSystems/HotShot/issues/1692
// consensus.saved_payloads.insert(proposal.data.block_payload);
Expand Down

0 comments on commit 97aba4f

Please sign in to comment.