diff --git a/Cargo.lock b/Cargo.lock index 1cb7767d15..7b0358f5c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5594,6 +5594,7 @@ dependencies = [ "ark-serialize 0.4.2", "ark-srs", "ark-std 0.4.0", + "async-broadcast", "async-lock 3.4.0", "async-trait", "bincode", diff --git a/hotshot-examples/infra/mod.rs b/hotshot-examples/infra/mod.rs index c125bd371f..875d849678 100755 --- a/hotshot-examples/infra/mod.rs +++ b/hotshot-examples/infra/mod.rs @@ -54,6 +54,7 @@ use hotshot_testing::block_builder::{ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, TestableLeaf}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, network::{BuilderType, NetworkConfig, NetworkConfigFile, NetworkConfigSource}, traits::{ @@ -388,13 +389,14 @@ pub trait RunDa< // TODO: we need to pass a valid fallback builder url here somehow fallback_builder_url: config.config.builder_urls.first().clone(), }; + let epoch_height = config.config.epoch_height; SystemContext::init( pk, sk, config.node_index, config.config, - membership, + EpochMembershipCoordinator::new(membership, epoch_height), Arc::from(network), initializer, ConsensusMetricsValue::default(), @@ -524,15 +526,15 @@ pub trait RunDa< } } } + // Panic if we don't have the genesis epoch, there is no recovery from that let num_eligible_leaders = context .hotshot - .memberships - .read() + .membership_coordinator + .membership_for_epoch(genesis_epoch_from_version::()) + .await + .unwrap() + .committee_leaders(TYPES::View::genesis()) .await - .committee_leaders( - TYPES::View::genesis(), - genesis_epoch_from_version::(), - ) .len(); let consensus_lock = context.hotshot.consensus(); let consensus_reader = consensus_lock.read().await; diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index a4e2ebd36c..847f8c60bc 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -42,6 +42,7 @@ use hotshot_query_service::{ use hotshot_testing::block_builder::{SimpleBuilderImplementation, TestBuilderImplementation}; use hotshot_types::{ consensus::ConsensusMetricsValue, + epoch_membership::EpochMembershipCoordinator, light_client::StateKeyPair, signature_key::BLSPubKey, traits::{election::Membership, network::Topic}, @@ -236,13 +237,17 @@ async fn init_consensus( )); let storage: TestStorage = TestStorage::default(); + let coordinator = EpochMembershipCoordinator::new( + Arc::new(RwLock::new(membership)), + config.epoch_height, + ); SystemContext::init( pub_keys[node_id], priv_key, node_id as u64, config, - Arc::new(RwLock::new(membership)), + coordinator, network, HotShotInitializer::from_genesis::( TestInstanceState::default(), diff --git a/hotshot-query-service/src/testing/consensus.rs b/hotshot-query-service/src/testing/consensus.rs index 88a146aa79..28236ad6bf 100644 --- a/hotshot-query-service/src/testing/consensus.rs +++ b/hotshot-query-service/src/testing/consensus.rs @@ -38,6 +38,7 @@ use hotshot_example_types::{ use hotshot_testing::block_builder::{SimpleBuilderImplementation, TestBuilderImplementation}; use hotshot_types::{ consensus::ConsensusMetricsValue, + epoch_membership::EpochMembershipCoordinator, light_client::StateKeyPair, signature_key::BLSPubKey, traits::{election::Membership, network::Topic, signature_key::SignatureKey as _}, @@ -181,13 +182,15 @@ impl MockNetwork { )); let hs_storage: TestStorage = TestStorage::default(); + let memberships = + EpochMembershipCoordinator::new(membership, config.epoch_height); let hotshot = SystemContext::init( pub_keys[node_id], priv_key, node_id as u64, config, - membership, + memberships, network, HotShotInitializer::from_genesis::( TestInstanceState::default(), diff --git a/hotshot-task-impls/src/consensus/handlers.rs b/hotshot-task-impls/src/consensus/handlers.rs index 64b8e19960..e33b2596ee 100644 --- a/hotshot-task-impls/src/consensus/handlers.rs +++ b/hotshot-task-impls/src/consensus/handlers.rs @@ -11,10 +11,7 @@ use chrono::Utc; use hotshot_types::{ event::{Event, EventType}, simple_vote::{HasEpoch, QuorumVote2, TimeoutData2, TimeoutVote2}, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, NodeImplementation, NodeType}, - }, + traits::node_implementation::{ConsensusTime, NodeImplementation, NodeType}, utils::EpochTransitionIndicator, vote::{HasViewNumber, Vote}, }; @@ -47,12 +44,14 @@ pub(crate) async fn handle_quorum_vote_recv< .read() .await .is_high_qc_for_last_block(); - let we_are_leader = task_state - .membership - .read() + let epoch_membership = task_state + .membership_coordinator + .membership_for_epoch(vote.data.epoch) .await - .leader(vote.view_number() + 1, vote.data.epoch)? - == task_state.public_key; + .context(warn!("No stake table for epoch"))?; + + let we_are_leader = + epoch_membership.leader(vote.view_number() + 1).await? == task_state.public_key; ensure!( in_transition || we_are_leader, info!( @@ -70,8 +69,7 @@ pub(crate) async fn handle_quorum_vote_recv< &mut task_state.vote_collectors, vote, task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, + &epoch_membership, task_state.id, &event, sender, @@ -80,20 +78,19 @@ pub(crate) async fn handle_quorum_vote_recv< ) .await?; - if let Some(vote_epoch) = vote.epoch() { + if vote.epoch().is_some() { // If the vote sender belongs to the next epoch, collect it separately to form the second QC - let has_stake = task_state - .membership - .read() - .await - .has_stake(&vote.signing_key(), Some(vote_epoch + 1)); + let has_stake = epoch_membership + .next_epoch() + .await? + .has_stake(&vote.signing_key()) + .await; if has_stake { handle_vote( &mut task_state.next_epoch_vote_collectors, &vote.clone().into(), task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, + &epoch_membership.next_epoch().await?.clone(), task_state.id, &event, sender, @@ -118,14 +115,14 @@ pub(crate) async fn handle_timeout_vote_recv< sender: &Sender>>, task_state: &mut ConsensusTaskState, ) -> Result<()> { + let epoch_membership = task_state + .membership_coordinator + .membership_for_epoch(task_state.cur_epoch) + .await + .context(warn!("No stake table for epoch"))?; // Are we the leader for this view? ensure!( - task_state - .membership - .read() - .await - .leader(vote.view_number() + 1, task_state.cur_epoch)? - == task_state.public_key, + epoch_membership.leader(vote.view_number() + 1).await? == task_state.public_key, info!( "We are not the leader for view {:?}", vote.view_number() + 1 @@ -136,8 +133,10 @@ pub(crate) async fn handle_timeout_vote_recv< &mut task_state.timeout_vote_collectors, vote, task_state.public_key.clone(), - &task_state.membership, - vote.data.epoch, + &task_state + .membership_coordinator + .membership_for_epoch(vote.data.epoch) + .await?, task_state.id, &event, sender, @@ -201,10 +200,11 @@ pub async fn send_high_qc ensure!( task_state - .membership - .read() + .membership_coordinator + .membership_for_epoch(epoch) .await - .has_stake(&task_state.public_key, epoch), + .context(warn!("No stake table for epoch"))? + .has_stake(&task_state.public_key) + .await, debug!( "We were not chosen for the consensus committee for view {:?}", view_number @@ -416,10 +420,12 @@ pub(crate) async fn handle_timeout .await; let leader = task_state - .membership - .read() + .membership_coordinator + .membership_for_epoch(task_state.cur_epoch) .await - .leader(view_number, task_state.cur_epoch); + .context(warn!("No stake table for epoch"))? + .leader(view_number) + .await; let consensus_reader = task_state.consensus.read().await; consensus_reader.metrics.number_of_timeouts.add(1); diff --git a/hotshot-task-impls/src/consensus/mod.rs b/hotshot-task-impls/src/consensus/mod.rs index 202e174152..6e303ee4fd 100644 --- a/hotshot-task-impls/src/consensus/mod.rs +++ b/hotshot-task-impls/src/consensus/mod.rs @@ -5,11 +5,11 @@ // along with the HotShot repository. If not, see . use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, + epoch_membership::EpochMembershipCoordinator, event::Event, message::UpgradeLock, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, TimeoutCertificate2}, @@ -53,7 +53,7 @@ pub struct ConsensusTaskState, V: pub network: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// A map of `QuorumVote` collector tasks. pub vote_collectors: VoteCollectorsMap, QuorumCertificate2, V>, @@ -183,7 +183,7 @@ impl, V: Versions> ConsensusTaskSt high_qc, Some(next_epoch_high_qc), &self.consensus, - &self.membership, + &self.membership_coordinator, &self.upgrade_lock, ) .await diff --git a/hotshot-task-impls/src/da.rs b/hotshot-task-impls/src/da.rs index 9ce863d193..f8bf3add36 100644 --- a/hotshot-task-impls/src/da.rs +++ b/hotshot-task-impls/src/da.rs @@ -10,15 +10,15 @@ use async_broadcast::{Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; +use hotshot_types::epoch_membership::EpochMembershipCoordinator; use hotshot_types::{ consensus::{Consensus, OuterConsensus, PayloadWithMetadata}, data::{vid_commitment, DaProposal2, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, - simple_vote::{DaData2, DaVote2, HasEpoch}, + simple_vote::{DaData2, DaVote2}, traits::{ - election::Membership, network::ConnectedNetwork, node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, @@ -56,7 +56,7 @@ pub struct DaTaskState, V: Version /// Membership for the DA committee and quorum committee. /// We need the latter only for calculating the proper VID scheme /// from the number of nodes in the quorum. - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// The underlying network pub network: Arc, @@ -116,10 +116,12 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState( OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), view_number, target_epoch, - membership, + membership.coordinator.clone(), &pk, &upgrade_lock, ) @@ -316,24 +321,26 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState( view_number: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, consensus: OuterConsensus, sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, @@ -78,7 +79,7 @@ pub(crate) async fn fetch_proposal( ) .await; - let mem = Arc::clone(&membership); + let mem_coordinator = membership_coordinator.clone(); // Make a background task to await the arrival of the event data. let Ok(Some(proposal)) = // We want to explicitly timeout here so we aren't waiting around for the data. @@ -109,9 +110,14 @@ pub(crate) async fn fetch_proposal( if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { + let proposal_epoch = option_epoch_from_block_number::( + quorum_proposal.data.proposal.epoch().is_some(), + quorum_proposal.data.block_header().block_number(), + epoch_height, + ); + let epoch_membership = mem_coordinator.membership_for_epoch(proposal_epoch).await.ok()?; // Make sure that the quorum_proposal is valid - let mem_reader = mem.read().await; - if quorum_proposal.validate_signature(&mem_reader, epoch_height).is_ok() { + if quorum_proposal.validate_signature(&epoch_membership).await.is_ok() { proposal = Some(quorum_proposal.clone()); } @@ -133,10 +139,11 @@ pub(crate) async fn fetch_proposal( let justify_qc_epoch = justify_qc.data.epoch(); - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(justify_qc_epoch); - let membership_success_threshold = membership_reader.success_threshold(justify_qc_epoch); - drop(membership_reader); + let epoch_membership = membership_coordinator + .membership_for_epoch(justify_qc_epoch) + .await?; + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; justify_qc .is_valid_cert( @@ -498,7 +505,7 @@ pub async fn decide_from_proposal( pub(crate) async fn parent_leaf_and_state( event_sender: &Sender>>, event_receiver: &Receiver>>, - membership: Arc>, + membership: EpochMembershipCoordinator, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, consensus: OuterConsensus, @@ -745,9 +752,8 @@ pub(crate) async fn validate_proposal_view_and_certs< ); // Validate the proposal's signature. This should also catch if the leaf_commitment does not equal our calculated parent commitment - let membership_reader = validation_info.membership.read().await; - proposal.validate_signature(&membership_reader, validation_info.epoch_height)?; - drop(membership_reader); + let mut membership = validation_info.membership.clone(); + proposal.validate_signature(&membership).await?; // Verify a timeout certificate OR a view sync certificate exists and is valid. if proposal.data.justify_qc().view_number() != view_number - 1 { @@ -765,12 +771,10 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number ); let timeout_cert_epoch = timeout_cert.data().epoch(); + membership = membership.get_new_epoch(timeout_cert_epoch).await?; - let membership_reader = validation_info.membership.read().await; - let membership_stake_table = membership_reader.stake_table(timeout_cert_epoch); - let membership_success_threshold = - membership_reader.success_threshold(timeout_cert_epoch); - drop(membership_reader); + let membership_stake_table = membership.stake_table().await; + let membership_success_threshold = membership.success_threshold().await; timeout_cert .is_valid_cert( @@ -795,12 +799,10 @@ pub(crate) async fn validate_proposal_view_and_certs< ); let view_sync_cert_epoch = view_sync_cert.data().epoch(); + membership = membership.get_new_epoch(view_sync_cert_epoch).await?; - let membership_reader = validation_info.membership.read().await; - let membership_stake_table = membership_reader.stake_table(view_sync_cert_epoch); - let membership_success_threshold = - membership_reader.success_threshold(view_sync_cert_epoch); - drop(membership_reader); + let membership_stake_table = membership.stake_table().await; + let membership_success_threshold = membership.success_threshold().await; // View sync certs must also be valid. view_sync_cert @@ -922,13 +924,15 @@ pub async fn validate_qc_and_next_epoch_qc( qc: &QuorumCertificate2, maybe_next_epoch_qc: Option<&NextEpochQuorumCertificate2>, consensus: &OuterConsensus, - membership: &Arc>, + membership_coordinator: &EpochMembershipCoordinator, upgrade_lock: &UpgradeLock, ) -> Result<()> { - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(qc.data.epoch); - let membership_success_threshold = membership_reader.success_threshold(qc.data.epoch); - drop(membership_reader); + let mut epoch_membership = membership_coordinator + .membership_for_epoch(qc.data.epoch) + .await?; + + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; { let consensus_reader = consensus.read().await; @@ -950,13 +954,9 @@ pub async fn validate_qc_and_next_epoch_qc( if qc.view_number() != next_epoch_qc.view_number() || qc.data != *next_epoch_qc.data { bail!("Next epoch qc exists but it's not equal with qc."); } - - let membership_reader = membership.read().await; - let membership_next_stake_table = - membership_reader.stake_table(qc.data.epoch.map(|x| x + 1)); - let membership_next_success_threshold = - membership_reader.success_threshold(qc.data.epoch.map(|x| x + 1)); - drop(membership_reader); + epoch_membership = epoch_membership.next_epoch().await?; + let membership_next_stake_table = epoch_membership.stake_table().await; + let membership_next_success_threshold = epoch_membership.success_threshold().await; // Validate the next epoch qc as well next_epoch_qc diff --git a/hotshot-task-impls/src/network.rs b/hotshot-task-impls/src/network.rs index 03004f5077..8994a594fb 100644 --- a/hotshot-task-impls/src/network.rs +++ b/hotshot-task-impls/src/network.rs @@ -17,6 +17,7 @@ use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{VidDisperse, VidDisperseShare}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType, HotShotAction}, message::{ convert_proposal, DaConsensusMessage, DataMessage, GeneralConsensusMessage, Message, @@ -24,7 +25,6 @@ use hotshot_types::{ }, simple_vote::HasEpoch, traits::{ - election::Membership, network::{ BroadcastDelay, ConnectedNetwork, RequestKind, ResponseMessage, Topic, TransmitType, ViewMessage, @@ -481,7 +481,7 @@ pub struct NetworkEventTaskState< pub epoch: Option, /// network memberships - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Storage to store actionable events pub storage: Arc>, @@ -725,10 +725,12 @@ impl< *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; let leader = match self - .membership - .read() + .membership_coordinator + .membership_for_epoch(vote.epoch()) + .await + .ok()? + .leader(view_number) .await - .leader(view_number, vote.epoch()) { Ok(l) => l, Err(e) => { @@ -821,8 +823,14 @@ impl< HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); - let epoch = vote.data.epoch; - let leader = match self.membership.read().await.leader(view_number, epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(vote.epoch()) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -866,7 +874,14 @@ impl< } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -892,7 +907,14 @@ impl< HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -918,7 +940,14 @@ impl< HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -986,7 +1015,14 @@ impl< HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -1019,7 +1055,14 @@ impl< HotShotEvent::UpgradeVoteSend(vote) => { tracing::error!("Sending upgrade vote!"); let view_number = vote.view_number(); - let leader = match self.membership.read().await.leader(view_number, self.epoch) { + let leader = match self + .membership_coordinator + .membership_for_epoch(self.epoch) + .await + .ok()? + .leader(view_number) + .await + { Ok(l) => l, Err(e) => { tracing::warn!( @@ -1047,9 +1090,10 @@ impl< self.cancel_tasks(keep_view); let net = Arc::clone(&self.network); let epoch = self.epoch.map(|x| x.u64()); - let mem = Arc::clone(&self.membership); + let membership_coordinator = self.membership_coordinator.clone(); spawn(async move { - net.update_view::(*keep_view, epoch, mem).await; + net.update_view::(*keep_view, epoch, membership_coordinator) + .await; }); None } @@ -1145,11 +1189,14 @@ impl< let view_number = message.kind.view_number(); let epoch = message.kind.epoch(); let committee_topic = Topic::Global; - let da_committee = self - .membership - .read() + let Ok(mem) = self + .membership_coordinator + .membership_for_epoch(self.epoch) .await - .da_committee_members(view_number, self.epoch); + else { + return; + }; + let da_committee = mem.da_committee_members(view_number).await; let network = Arc::clone(&self.network); let storage = Arc::clone(&self.storage); let consensus = OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)); @@ -1271,14 +1318,12 @@ pub mod test { self.parse_event(event, &mut maybe_action).await { // Modify the values acquired by parsing the event. - let membership_reader = self.membership.read().await; (self.modifier)( &mut sender, &mut message_kind, &mut transmit, - &membership_reader, + &*self.membership_coordinator.membership().read().await, ); - drop(membership_reader); self.spawn_transmit_task(message_kind, maybe_action, transmit, sender) .await; diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index fc5577c912..2203370d3d 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -26,11 +26,11 @@ use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, ViewChangeEvidence2}, + epoch_membership::EpochMembership, message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ block_contents::BlockHeader, - election::Membership, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, }, @@ -82,7 +82,7 @@ pub struct ProposalDependencyHandle { pub instance_state: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership: EpochMembership, /// Our public key pub public_key: TYPES::SignatureKey, @@ -128,11 +128,10 @@ impl ProposalDependencyHandle { ) -> Option> { while let Ok(event) = rx.recv_direct().await { if let HotShotEvent::HighQcRecv(qc, _sender) = event.as_ref() { - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(qc.data.epoch); - let membership_success_threshold = - membership_reader.success_threshold(qc.data.epoch); - drop(membership_reader); + let prev_epoch = qc.data.epoch; + let epoch_membership = self.membership.get_new_epoch(prev_epoch).await.ok()?; + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; if qc .is_valid_cert( @@ -208,7 +207,7 @@ impl ProposalDependencyHandle { let (parent_leaf, state) = parent_leaf_and_state( &self.sender, &self.receiver, - Arc::clone(&self.membership), + self.membership.coordinator.clone(), self.public_key.clone(), self.private_key.clone(), OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), @@ -309,16 +308,15 @@ impl ProposalDependencyHandle { self.epoch_height, ); + let epoch_membership = self + .membership + .coordinator + .membership_for_epoch(epoch) + .await?; // Make sure we are the leader for the view and epoch. // We might have ended up here because we were in the epoch transition. - if self - .membership - .read() - .await - .leader(self.view_number, epoch)? - != self.public_key - { - tracing::debug!( + if epoch_membership.leader(self.view_number).await? != self.public_key { + tracing::warn!( "We are not the leader in the epoch for which we are about to propose. Do not send the quorum proposal." ); return Ok(()); diff --git a/hotshot-task-impls/src/quorum_proposal/mod.rs b/hotshot-task-impls/src/quorum_proposal/mod.rs index bfbebe5a57..88c09de09e 100644 --- a/hotshot-task-impls/src/quorum_proposal/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal/mod.rs @@ -18,10 +18,10 @@ use hotshot_task::{ use hotshot_types::StakeTableEntries; use hotshot_types::{ consensus::OuterConsensus, + epoch_membership::EpochMembershipCoordinator, message::UpgradeLock, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, @@ -54,7 +54,7 @@ pub struct QuorumProposalTaskState pub instance_state: Arc, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Our public key pub public_key: TYPES::SignatureKey, @@ -282,9 +282,12 @@ impl, V: Versions> event: Arc>, epoch_transition_indicator: EpochTransitionIndicator, ) -> Result<()> { - let membership_reader = self.membership.read().await; + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(epoch_number) + .await?; let leader_in_current_epoch = - membership_reader.leader(view_number, epoch_number)? == self.public_key; + epoch_membership.leader(view_number).await? == self.public_key; // If we are in the epoch transition and we are the leader in the next epoch, // we might want to start collecting dependencies for our next epoch proposal. @@ -293,9 +296,16 @@ impl, V: Versions> epoch_transition_indicator, EpochTransitionIndicator::InTransition ) - && membership_reader.leader(view_number, epoch_number.map(|x| x + 1))? + && epoch_membership + .next_epoch() + .await + .context(warn!( + "No Stake Table for Epoch = {:?}", + epoch_number.unwrap() + 1 + ))? + .leader(view_number) + .await? == self.public_key; - drop(membership_reader); // Don't even bother making the task if we are not entitled to propose anyway. ensure!( @@ -328,7 +338,7 @@ impl, V: Versions> view_number, sender: event_sender, receiver: event_receiver, - membership: Arc::clone(&self.membership), + membership: epoch_membership, public_key: self.public_key.clone(), private_key: self.private_key.clone(), instance_state: Arc::clone(&self.instance_state), @@ -476,12 +486,14 @@ impl, V: Versions> } HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = certificate.data.epoch; + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(epoch_number) + .await + .context(warn!("No Stake Table for Epoch = {:?}", epoch_number))?; - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(epoch_number); - let membership_success_threshold = - membership_reader.success_threshold(epoch_number); - drop(membership_reader); + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; certificate .is_valid_cert( @@ -562,11 +574,12 @@ impl, V: Versions> ensure!(qc.view_number() > self.highest_qc.view_number()); let cert_epoch_number = qc.data.epoch; - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(cert_epoch_number); - let membership_success_threshold = - membership_reader.success_threshold(cert_epoch_number); - drop(membership_reader); + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(cert_epoch_number) + .await?; + let membership_stake_table = epoch_membership.stake_table().await; + let membership_success_threshold = epoch_membership.success_threshold().await; qc.is_valid_cert( StakeTableEntries::::from(membership_stake_table).0, diff --git a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs index a70fc30c96..f965bbfada 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs @@ -14,6 +14,7 @@ use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal, QuorumProposalWrapper}, + epoch_membership::EpochMembershipCoordinator, message::Proposal, simple_certificate::QuorumCertificate, simple_vote::HasEpoch, @@ -102,7 +103,7 @@ fn spawn_fetch_proposal( view: TYPES::View, event_sender: Sender>>, event_receiver: Receiver>>, - membership: Arc>, + membership: EpochMembershipCoordinator, consensus: OuterConsensus, sender_public_key: TYPES::SignatureKey, sender_private_key: ::PrivateKey, @@ -173,7 +174,7 @@ pub(crate) async fn handle_quorum_proposal_recv< &justify_qc, maybe_next_epoch_justify_qc.as_ref(), &validation_info.consensus, - &validation_info.membership, + &validation_info.membership.coordinator, &validation_info.upgrade_lock, ) .await?; @@ -200,7 +201,7 @@ pub(crate) async fn handle_quorum_proposal_recv< justify_qc.view_number(), event_sender.clone(), event_receiver.clone(), - Arc::clone(&validation_info.membership), + validation_info.membership.coordinator.clone(), OuterConsensus::new(Arc::clone(&validation_info.consensus.inner_consensus)), // Note that we explicitly use the node key here instead of the provided key in the signature. // This is because the key that we receive is for the prior leader, so the payload would be routed diff --git a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs index a7ab61cd50..b56130f3b3 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs @@ -17,13 +17,17 @@ use hotshot_task::task::{Task, TaskState}; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, data::{EpochNumber, Leaf, ViewChangeEvidence2}, + epoch_membership::{self, EpochMembership, EpochMembershipCoordinator}, event::Event, message::UpgradeLock, simple_certificate::UpgradeCertificate, + simple_vote::HasEpoch, + traits::block_contents::BlockHeader, traits::{ node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, }, + utils::option_epoch_from_block_number, vote::{Certificate, HasViewNumber}, }; use hotshot_utils::anytrace::{bail, Result}; @@ -58,7 +62,7 @@ pub struct QuorumProposalRecvTaskState, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership: EpochMembershipCoordinator, /// View timeout from config. pub timeout: u64, @@ -99,7 +103,7 @@ pub(crate) struct ValidationInfo, pub(crate) consensus: OuterConsensus, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership: EpochMembership, /// Output events to application pub output_event_stream: async_broadcast::Sender>, @@ -145,12 +149,23 @@ impl, V: Versions> tracing::error!("Throwing away old proposal"); return; } + let proposal_epoch = option_epoch_from_block_number::( + proposal.data.proposal.epoch().is_some(), + proposal.data.block_header().block_number(), + self.epoch_height, + ); + let Ok(epoch_membership) = + self.membership.membership_for_epoch(proposal_epoch).await + else { + tracing::warn!("No Stake table for epoch = {:?}", proposal_epoch); + return; + }; let validation_info = ValidationInfo:: { id: self.id, public_key: self.public_key.clone(), private_key: self.private_key.clone(), consensus: self.consensus.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_membership, output_event_stream: self.output_event_stream.clone(), storage: Arc::clone(&self.storage), upgrade_lock: self.upgrade_lock.clone(), @@ -166,7 +181,7 @@ impl, V: Versions> .await { Ok(()) => {} - Err(e) => debug!(?e, "Failed to validate the proposal"), + Err(e) => error!(?e, "Failed to validate the proposal"), } } HotShotEvent::ViewChange(view, epoch) => { diff --git a/hotshot-task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs index 768d6a162c..19a926a0fd 100644 --- a/hotshot-task-impls/src/quorum_vote/handlers.rs +++ b/hotshot-task-impls/src/quorum_vote/handlers.rs @@ -10,10 +10,12 @@ use async_broadcast::{InactiveReceiver, Sender}; use async_lock::RwLock; use chrono::Utc; use committable::Committable; +use hotshot_types::epoch_membership::EpochMembership; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposalWrapper, VidDisperseShare}, drb::{compute_drb_result, DrbResult, INITIAL_DRB_RESULT}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_vote::{HasEpoch, QuorumData2, QuorumVote2}, @@ -47,12 +49,11 @@ use crate::{ }; async fn notify_membership_of_drb_result( - membership: &Arc>, - epoch: ::Epoch, + membership: &EpochMembership, drb_result: DrbResult, ) { - tracing::debug!("Calling add_drb_result for epoch {:?}", epoch); - membership.write().await.add_drb_result(epoch, drb_result); + tracing::debug!("Calling add_drb_result for epoch {:?}", membership.epoch()); + membership.add_drb_result(drb_result).await; } /// Store the DRB result from the computation task to the shared `results` table. @@ -100,8 +101,14 @@ async fn store_and_get_computed_drb_result< .insert(epoch_number, result); drop(consensus_writer); - notify_membership_of_drb_result::(&task_state.membership, epoch_number, result) - .await; + notify_membership_of_drb_result::( + &task_state + .membership + .membership_for_epoch(Some(epoch_number)) + .await?, + result, + ) + .await; task_state.drb_computation = None; Ok(result) } @@ -146,13 +153,14 @@ async fn verify_drb_result, V: Ver .next_drb_result() .context(info!("Proposal is missing the DRB result."))?; - let membership_reader = task_state.membership.read().await; - if let Some(epoch_val) = epoch { - let has_stake_current_epoch = - membership_reader.has_stake(&task_state.public_key, Some(epoch_val)); - - drop(membership_reader); + let has_stake_current_epoch = task_state + .membership + .membership_for_epoch(epoch) + .await + .context(warn!("No stake table for epoch"))? + .has_stake(&task_state.public_key) + .await; if has_stake_current_epoch { let computed_result = @@ -184,13 +192,17 @@ async fn start_drb_task, V: Versio task_state.epoch_height, )); - // Start the new task if we're in the committee for this epoch - if task_state + let Ok(epoch_membership) = task_state .membership - .read() + .membership_for_epoch(Some(current_epoch_number)) .await - .has_stake(&task_state.public_key, Some(current_epoch_number)) - { + else { + tracing::warn!("No Stake Table for Epoch = {:?}", current_epoch_number); + return; + }; + + // Start the new task if we're in the committee for this epoch + if epoch_membership.has_stake(&task_state.public_key).await { let new_epoch_number = current_epoch_number + 1; // If a task is currently live AND has finished, join it and save the result. @@ -209,12 +221,7 @@ async fn start_drb_task, V: Versio .drb_seeds_and_results .results .insert(*task_epoch, result); - notify_membership_of_drb_result::( - &task_state.membership, - *task_epoch, - result, - ) - .await; + notify_membership_of_drb_result::(&epoch_membership, result).await; task_state.drb_computation = None; } Err(e) => { @@ -329,8 +336,10 @@ async fn store_drb_seed_and_result .results .insert(current_epoch_number + 1, result); notify_membership_of_drb_result::( - &task_state.membership, - current_epoch_number + 1, + &task_state + .membership + .membership_for_epoch(Some(current_epoch_number + 1)) + .await?, result, ) .await; @@ -376,7 +385,7 @@ pub(crate) async fn handle_quorum_proposal_validated< Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, - &task_state.membership, + task_state.membership.membership(), ) .await } else { @@ -386,7 +395,7 @@ pub(crate) async fn handle_quorum_proposal_validated< Arc::clone(&task_state.upgrade_lock.decided_upgrade_certificate), &task_state.public_key, version >= V::Epochs::VERSION, - &task_state.membership, + task_state.membership.membership(), ) .await }; @@ -424,6 +433,7 @@ pub(crate) async fn handle_quorum_proposal_validated< tracing::debug!("Calling set_first_epoch for epoch {:?}", first_epoch_number); task_state .membership + .membership() .write() .await .set_first_epoch(first_epoch_number, INITIAL_DRB_RESULT); @@ -521,7 +531,7 @@ pub(crate) async fn update_shared_state< consensus: OuterConsensus, sender: Sender>>, receiver: InactiveReceiver>>, - membership: Arc>, + membership: EpochMembershipCoordinator, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, upgrade_lock: UpgradeLock, @@ -560,7 +570,7 @@ pub(crate) async fn update_shared_state< justify_qc.view_number(), sender.clone(), receiver.activate_cloned(), - Arc::clone(&membership), + membership.clone(), OuterConsensus::new(Arc::clone(&consensus.inner_consensus)), public_key.clone(), private_key.clone(), @@ -656,7 +666,7 @@ pub(crate) async fn update_shared_state< #[allow(clippy::too_many_arguments)] pub(crate) async fn submit_vote, V: Versions>( sender: Sender>>, - membership: Arc>, + membership: EpochMembership, public_key: TYPES::SignatureKey, private_key: ::PrivateKey, upgrade_lock: UpgradeLock, @@ -667,20 +677,12 @@ pub(crate) async fn submit_vote, V extended_vote: bool, epoch_height: u64, ) -> Result<()> { - let epoch_number = option_epoch_from_block_number::( - leaf.with_epoch, - leaf.block_header().block_number(), - epoch_height, - ); - - let membership_reader = membership.read().await; - let committee_member_in_current_epoch = membership_reader.has_stake(&public_key, epoch_number); + let committee_member_in_current_epoch = membership.has_stake(&public_key).await; // If the proposed leaf is for the last block in the epoch and the node is part of the quorum committee // in the next epoch, the node should vote to achieve the double quorum. let committee_member_in_next_epoch = leaf.with_epoch && is_last_block_in_epoch(leaf.height(), epoch_height) - && membership_reader.has_stake(&public_key, epoch_number.map(|x| x + 1)); - drop(membership_reader); + && membership.next_epoch().await?.has_stake(&public_key).await; ensure!( committee_member_in_current_epoch || committee_member_in_next_epoch, @@ -694,7 +696,7 @@ pub(crate) async fn submit_vote, V let vote = QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: leaf.commit(), - epoch: epoch_number, + epoch: membership.epoch(), }, view_number, &public_key, diff --git a/hotshot-task-impls/src/quorum_vote/mod.rs b/hotshot-task-impls/src/quorum_vote/mod.rs index 8b26cbb39a..e3d7af2771 100644 --- a/hotshot-task-impls/src/quorum_vote/mod.rs +++ b/hotshot-task-impls/src/quorum_vote/mod.rs @@ -20,13 +20,13 @@ use hotshot_types::{ consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposalWrapper}, drb::DrbComputation, + epoch_membership::EpochMembershipCoordinator, event::Event, message::{Proposal, UpgradeLock}, simple_certificate::UpgradeCertificate, simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, - election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, storage::Storage, @@ -74,7 +74,7 @@ pub struct VoteDependencyHandle, V pub instance_state: Arc, /// Membership for Quorum certs/votes. - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Reference to the storage. pub storage: Arc>, @@ -237,7 +237,7 @@ impl + 'static, V: Versions> Handl OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), self.sender.clone(), self.receiver.clone(), - Arc::clone(&self.membership), + self.membership_coordinator.clone(), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -254,12 +254,24 @@ impl + 'static, V: Versions> Handl tracing::error!("Failed to update shared consensus state; error = {e:#}"); return; } - let cur_epoch = option_epoch_from_block_number::( leaf.with_epoch, leaf.height(), self.epoch_height, ); + + let epoch_membership = match self + .membership_coordinator + .membership_for_epoch(cur_epoch) + .await + { + Ok(epoch_membership) => epoch_membership, + Err(e) => { + tracing::warn!("{:?}", e); + return; + } + }; + tracing::trace!( "Sending ViewChange for view {} and epoch {:?}", self.view_number + 1, @@ -273,7 +285,7 @@ impl + 'static, V: Versions> Handl if let Err(e) = submit_vote::( self.sender.clone(), - Arc::clone(&self.membership), + epoch_membership, self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -317,7 +329,7 @@ pub struct QuorumVoteTaskState, V: pub network: Arc, /// Membership for Quorum certs/votes and DA committee certs/votes. - pub membership: Arc>, + pub membership: EpochMembershipCoordinator, /// In-progress DRB computation task. pub drb_computation: DrbComputation, @@ -441,7 +453,7 @@ impl, V: Versions> QuorumVoteTaskS private_key: self.private_key.clone(), consensus: OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), instance_state: Arc::clone(&self.instance_state), - membership: Arc::clone(&self.membership), + membership_coordinator: self.membership.clone(), storage: Arc::clone(&self.storage), view_number, sender: event_sender.clone(), @@ -553,11 +565,9 @@ impl, V: Versions> QuorumVoteTaskS let cert_epoch = cert.data.epoch; - let membership_reader = self.membership.read().await; - let membership_da_stake_table = membership_reader.da_stake_table(cert_epoch); - let membership_da_success_threshold = - membership_reader.da_success_threshold(cert_epoch); - drop(membership_reader); + let epoch_membership = self.membership.membership_for_epoch(cert_epoch).await?; + let membership_da_stake_table = epoch_membership.da_stake_table().await; + let membership_da_success_threshold = epoch_membership.da_success_threshold().await; // Validate the DAC. cert.is_valid_cert( @@ -606,18 +616,23 @@ impl, V: Versions> QuorumVoteTaskS let vid_epoch = share.data.epoch(); let target_epoch = share.data.target_epoch(); - let membership_reader = self.membership.read().await; + let membership_reader = self.membership.membership_for_epoch(vid_epoch).await?; // ensure that the VID share was sent by a DA member OR the view leader ensure!( membership_reader - .da_committee_members(view, vid_epoch) + .da_committee_members(view) + .await .contains(sender) - || *sender == membership_reader.leader(view, vid_epoch)?, + || *sender == membership_reader.leader(view).await?, "VID share was not sent by a DA member or the view leader." ); - let membership_total_nodes = membership_reader.total_nodes(target_epoch); - drop(membership_reader); + let membership_total_nodes = self + .membership + .membership_for_epoch(target_epoch) + .await? + .total_nodes() + .await; if let Err(()) = share.data.verify_share(membership_total_nodes) { bail!("Failed to verify VID share"); @@ -734,7 +749,7 @@ impl, V: Versions> QuorumVoteTaskS OuterConsensus::new(Arc::clone(&self.consensus.inner_consensus)), event_sender.clone(), event_receiver.clone().deactivate(), - Arc::clone(&self.membership), + self.membership.clone(), self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), @@ -781,7 +796,9 @@ impl, V: Versions> QuorumVoteTaskS submit_vote::( event_sender.clone(), - Arc::clone(&self.membership), + self.membership + .membership_for_epoch(Some(current_epoch)) + .await?, self.public_key.clone(), self.private_key.clone(), self.upgrade_lock.clone(), diff --git a/hotshot-task-impls/src/request.rs b/hotshot-task-impls/src/request.rs index 02d10a284d..1bf327a98f 100644 --- a/hotshot-task-impls/src/request.rs +++ b/hotshot-task-impls/src/request.rs @@ -14,7 +14,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::{ dependency::{Dependency, EventDependency}, @@ -22,10 +21,10 @@ use hotshot_task::{ }; use hotshot_types::{ consensus::OuterConsensus, + epoch_membership::EpochMembershipCoordinator, simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, - election::Membership, network::{ConnectedNetwork, DataRequest, RequestKind}, node_implementation::{NodeImplementation, NodeType}, signature_key::SignatureKey, @@ -68,7 +67,7 @@ pub struct NetworkRequestState> { pub delay: Duration, /// Membership (Used here only for DA) - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// This nodes public key pub public_key: TYPES::SignatureKey, @@ -114,14 +113,20 @@ impl> TaskState for NetworkRequest HotShotEvent::QuorumProposalValidated(proposal, _) => { let prop_view = proposal.data.view_number(); let prop_epoch = proposal.data.epoch(); - let next_epoch = prop_epoch.map(|epoch| epoch + 1); // Request VID share only if: // 1. we are part of the current epoch or // 2. we are part of the next epoch and this is a proposal for the last block. - let membership_reader = self.membership.read().await; - if !membership_reader.has_stake(&self.public_key, prop_epoch) - && (!membership_reader.has_stake(&self.public_key, next_epoch) + let membership_reader = self + .membership_coordinator + .membership_for_epoch(prop_epoch) + .await?; + if !membership_reader.has_stake(&self.public_key).await + && (!membership_reader + .next_epoch() + .await? + .has_stake(&self.public_key) + .await || !is_last_block_in_epoch( proposal.data.block_header().block_number(), self.epoch_height, @@ -129,7 +134,6 @@ impl> TaskState for NetworkRequest { return Ok(()); } - drop(membership_reader); let consensus_reader = self.consensus.read().await; let maybe_vid_share = consensus_reader @@ -213,15 +217,26 @@ impl> NetworkRequestState m, + Err(e) => { + tracing::warn!(e.message); + return; + } + }; + let mut da_committee_for_view = membership_reader.da_committee_members(view).await; + if let Ok(leader) = membership_reader.leader(view).await { da_committee_for_view.insert(leader); } // Get committee members for view let mut recipients: Vec = membership_reader - .da_committee_members(view, epoch) + .da_committee_members(view) + .await .into_iter() .collect(); drop(membership_reader); diff --git a/hotshot-task-impls/src/response.rs b/hotshot-task-impls/src/response.rs index f0000a7c14..1ea66cc667 100644 --- a/hotshot-task-impls/src/response.rs +++ b/hotshot-task-impls/src/response.rs @@ -7,14 +7,13 @@ use std::{sync::Arc, time::Duration}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use committable::Committable; use hotshot_types::{ consensus::{Consensus, LockedConsensusState, OuterConsensus}, data::VidDisperseShare, + epoch_membership::EpochMembershipCoordinator, message::{Proposal, UpgradeLock}, traits::{ - election::Membership, network::DataRequest, node_implementation::{NodeType, Versions}, signature_key::SignatureKey, @@ -36,7 +35,7 @@ pub struct NetworkResponseState { consensus: LockedConsensusState, /// Quorum membership for checking if requesters have state - membership: Arc>, + membership: EpochMembershipCoordinator, /// This replicas public key pub_key: TYPES::SignatureKey, @@ -55,7 +54,7 @@ impl NetworkResponseState { /// Create the network request state with the info it needs pub fn new( consensus: LockedConsensusState, - membership: Arc>, + membership: EpochMembershipCoordinator, pub_key: TYPES::SignatureKey, private_key: ::PrivateKey, id: u64, @@ -175,7 +174,7 @@ impl NetworkResponseState { OuterConsensus::new(Arc::clone(&self.consensus)), view, target_epoch, - Arc::clone(&self.membership), + self.membership.clone(), &self.private_key, &self.upgrade_lock, ) @@ -188,7 +187,7 @@ impl NetworkResponseState { OuterConsensus::new(Arc::clone(&self.consensus)), view, target_epoch, - Arc::clone(&self.membership), + self.membership.clone(), &self.private_key, &self.upgrade_lock, ) @@ -210,7 +209,10 @@ impl NetworkResponseState { sender: &TYPES::SignatureKey, epoch: Option, ) -> bool { - self.membership.read().await.has_stake(sender, epoch) + let Ok(memb) = self.membership.membership_for_epoch(epoch).await else { + return false; + }; + memb.has_stake(sender).await } } diff --git a/hotshot-task-impls/src/transactions.rs b/hotshot-task-impls/src/transactions.rs index 4437597c56..06a65f8b63 100644 --- a/hotshot-task-impls/src/transactions.rs +++ b/hotshot-task-impls/src/transactions.rs @@ -10,7 +10,6 @@ use std::{ }; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use futures::{future::join_all, stream::FuturesUnordered, StreamExt}; use hotshot_builder_api::v0_1::block_info::AvailableBlockInfo; @@ -19,12 +18,12 @@ use hotshot_types::{ consensus::OuterConsensus, data::VidCommitment, data::{null_block, PackedBundle}, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::UpgradeLock, traits::{ auction_results_provider::AuctionResultsProvider, block_contents::{BuilderFee, EncodeBytes}, - election::Membership, node_implementation::{ConsensusTime, HasUrls, NodeImplementation, NodeType, Versions}, signature_key::{BuilderSignatureKey, SignatureKey}, BlockPayload, @@ -92,7 +91,7 @@ pub struct TransactionTaskState, V pub consensus: OuterConsensus, /// Membership for the quorum - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Builder 0.1 API clients pub builder_clients: Vec>, @@ -482,7 +481,12 @@ impl, V: Versions> TransactionTask self.cur_view = view; self.cur_epoch = epoch; - let leader = self.membership.read().await.leader(view, epoch)?; + let leader = self + .membership_coordinator + .membership_for_epoch(epoch) + .await? + .leader(view) + .await?; if leader == self.public_key { self.handle_view_change(&event_stream, view, epoch).await; return Ok(()); diff --git a/hotshot-task-impls/src/upgrade.rs b/hotshot-task-impls/src/upgrade.rs index c98cd611e9..4eaffb5dd8 100644 --- a/hotshot-task-impls/src/upgrade.rs +++ b/hotshot-task-impls/src/upgrade.rs @@ -7,18 +7,17 @@ use std::{marker::PhantomData, sync::Arc, time::SystemTime}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; use hotshot_task::task::TaskState; use hotshot_types::{ data::UpgradeProposal, + epoch_membership::EpochMembershipCoordinator, event::{Event, EventType}, message::{Proposal, UpgradeLock}, simple_certificate::UpgradeCertificate, simple_vote::{UpgradeProposalData, UpgradeVote}, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, @@ -47,7 +46,7 @@ pub struct UpgradeTaskState { pub cur_epoch: Option, /// Membership for Quorum Certs/votes - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// A map of `UpgradeVote` collector tasks pub vote_collectors: VoteCollectorsMap, UpgradeCertificate, V>, @@ -177,7 +176,12 @@ impl UpgradeTaskState { ); // We then validate that the proposal was issued by the leader for the view. - let view_leader_key = self.membership.read().await.leader(view, self.cur_epoch)?; + let view_leader_key = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await? + .leader(view) + .await?; ensure!( view_leader_key == *sender, info!( @@ -218,25 +222,25 @@ impl UpgradeTaskState { tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader. - { - let view = vote.view_number(); - let membership_reader = self.membership.read().await; - ensure!( - membership_reader.leader(view, self.cur_epoch)? == self.public_key, - debug!( - "We are not the leader for view {} are we leader for next view? {}", - *view, - membership_reader.leader(view + 1, self.cur_epoch)? == self.public_key - ) - ); - } + let view = vote.view_number(); + let epoch_membership = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; + ensure!( + epoch_membership.leader(view).await? == self.public_key, + debug!( + "We are not the leader for view {} are we leader for next view? {}", + *view, + epoch_membership.leader(view + 1).await? == self.public_key + ) + ); handle_vote( &mut self.vote_collectors, vote, self.public_key.clone(), - &self.membership, - self.cur_epoch, + &epoch_membership, self.id, &event, &tx, @@ -262,10 +266,14 @@ impl UpgradeTaskState { ))? .as_secs(); - let leader = self.membership.read().await.leader( - TYPES::View::new(view + TYPES::UPGRADE_CONSTANTS.propose_offset), - self.cur_epoch, - )?; + let leader = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await? + .leader(TYPES::View::new( + view + TYPES::UPGRADE_CONSTANTS.propose_offset, + )) + .await?; // We try to form a certificate 5 views before we're leader. if view >= self.start_proposing_view diff --git a/hotshot-task-impls/src/vid.rs b/hotshot-task-impls/src/vid.rs index 21ffe46eed..8f98e018a7 100644 --- a/hotshot-task-impls/src/vid.rs +++ b/hotshot-task-impls/src/vid.rs @@ -7,17 +7,16 @@ use std::{marker::PhantomData, sync::Arc}; use async_broadcast::{Receiver, Sender}; -use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::OuterConsensus, data::{PackedBundle, VidDisperse, VidDisperseShare}, + epoch_membership::EpochMembershipCoordinator, message::{Proposal, UpgradeLock}, simple_vote::HasEpoch, traits::{ block_contents::BlockHeader, - election::Membership, node_implementation::{NodeImplementation, NodeType, Versions}, signature_key::SignatureKey, BlockPayload, @@ -47,7 +46,7 @@ pub struct VidTaskState, V: Versio pub network: Arc, /// Membership for the quorum - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -88,10 +87,12 @@ impl, V: Versions> VidTaskState, V: Versions> VidTaskState( &payload, - &Arc::clone(&self.membership), + &self.membership_coordinator, *view_number, epoch, epoch, @@ -210,7 +211,7 @@ impl, V: Versions> VidTaskState( &payload.payload, - &Arc::clone(&self.membership), + &self.membership_coordinator, proposal_view_number, target_epoch, sender_epoch, diff --git a/hotshot-task-impls/src/view_sync.rs b/hotshot-task-impls/src/view_sync.rs index f984bc4dff..8dcc296cfe 100644 --- a/hotshot-task-impls/src/view_sync.rs +++ b/hotshot-task-impls/src/view_sync.rs @@ -16,6 +16,7 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, message::UpgradeLock, simple_certificate::{ ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, @@ -25,7 +26,6 @@ use hotshot_types::{ ViewSyncPreCommitData2, ViewSyncPreCommitVote2, }, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, @@ -75,7 +75,7 @@ pub struct ViewSyncTaskState { pub cur_epoch: Option, /// Membership for the quorum - pub membership: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -143,9 +143,6 @@ pub struct ViewSyncReplicaTaskState { /// Round HotShot wishes to be in pub next_view: TYPES::View, - /// Current epoch HotShot is in - pub cur_epoch: Option, - /// The relay index we are currently on pub relay: u64, @@ -162,7 +159,7 @@ pub struct ViewSyncReplicaTaskState { pub id: u64, /// Membership for the quorum - pub membership: Arc>, + pub membership: EpochMembership, /// This Nodes Public Key pub public_key: TYPES::SignatureKey, @@ -227,16 +224,27 @@ impl ViewSyncTaskState { return; } + let membership = match self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await + { + Ok(m) => m, + Err(e) => { + tracing::warn!(e.message); + return; + } + }; + // We do not have a replica task already running, so start one let mut replica_state: ViewSyncReplicaTaskState = ViewSyncReplicaTaskState { cur_view: view, next_view: view, - cur_epoch: self.cur_epoch, relay: 0, finalized: false, sent_view_change_event: false, timeout_task: None, - membership: Arc::clone(&self.membership), + membership, public_key: self.public_key.clone(), private_key: self.private_key.clone(), view_sync_timeout: self.view_sync_timeout, @@ -310,22 +318,21 @@ impl ViewSyncTaskState { return Ok(()); } + let epoch_mem = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; // We do not have a relay task already running, so start one ensure!( - self.membership - .read() - .await - .leader(vote_view + relay, self.cur_epoch)? - == self.public_key, + epoch_mem.leader(vote_view + relay).await? == self.public_key, "View sync vote sent to wrong leader" ); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_mem, view: vote_view, id: self.id, - epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( &info, @@ -360,21 +367,20 @@ impl ViewSyncTaskState { } // We do not have a relay task already running, so start one + let epoch_mem = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; ensure!( - self.membership - .read() - .await - .leader(vote_view + relay, self.cur_epoch)? - == self.public_key, + epoch_mem.leader(vote_view + relay).await? == self.public_key, debug!("View sync vote sent to wrong leader") ); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_mem, view: vote_view, id: self.id, - epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( @@ -408,22 +414,21 @@ impl ViewSyncTaskState { return Ok(()); } + let epoch_mem = self + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await?; // We do not have a relay task already running, so start one ensure!( - self.membership - .read() - .await - .leader(vote_view + relay, self.cur_epoch)? - == self.public_key, + epoch_mem.leader(vote_view + relay).await? == self.public_key, debug!("View sync vote sent to wrong leader") ); let info = AccumulatorInfo { public_key: self.public_key.clone(), - membership: Arc::clone(&self.membership), + membership: epoch_mem, view: vote_view, id: self.id, - epoch: vote.data.epoch, }; let vote_collector = create_vote_accumulator( &info, @@ -488,10 +493,11 @@ impl ViewSyncTaskState { self.num_timeouts_tracked += 1; let leader = self - .membership - .read() - .await - .leader(view_number, self.cur_epoch)?; + .membership_coordinator + .membership_for_epoch(self.cur_epoch) + .await? + .leader(view_number) + .await?; tracing::warn!( %leader, leader_mnemonic = hotshot_types::utils::mnemonic(&leader), @@ -531,7 +537,7 @@ impl ViewSyncTaskState { } impl ViewSyncReplicaTaskState { - #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.cur_epoch.map(|x| *x)), name = "View Sync Replica Task", level = "error")] + #[instrument(skip_all, fields(id = self.id, view = *self.cur_view, epoch = self.membership.epoch().map(|x| *x)), name = "View Sync Replica Task", level = "error")] /// Handle incoming events for the view sync replica task pub async fn handle( &mut self, @@ -549,11 +555,8 @@ impl ViewSyncReplicaTaskState { return None; } - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(self.cur_epoch); - let membership_failure_threshold = - membership_reader.failure_threshold(self.cur_epoch); - drop(membership_reader); + let membership_stake_table = self.membership.stake_table().await; + let membership_failure_threshold = self.membership.failure_threshold().await; // If certificate is not valid, return current state if let Err(e) = certificate @@ -643,11 +646,8 @@ impl ViewSyncReplicaTaskState { return None; } - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(self.cur_epoch); - let membership_success_threshold = - membership_reader.success_threshold(self.cur_epoch); - drop(membership_reader); + let membership_stake_table = self.membership.stake_table().await; + let membership_success_threshold = self.membership.success_threshold().await; // If certificate is not valid, return current state if let Err(e) = certificate @@ -707,7 +707,10 @@ impl ViewSyncReplicaTaskState { // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), + Arc::new(HotShotEvent::ViewChange( + self.next_view, + self.membership.epoch(), + )), &event_stream, ) .await; @@ -748,11 +751,8 @@ impl ViewSyncReplicaTaskState { return None; } - let membership_reader = self.membership.read().await; - let membership_stake_table = membership_reader.stake_table(self.cur_epoch); - let membership_success_threshold = - membership_reader.success_threshold(self.cur_epoch); - drop(membership_reader); + let membership_stake_table = self.membership.stake_table().await; + let membership_success_threshold = self.membership.success_threshold().await; // If certificate is not valid, return current state if let Err(e) = certificate @@ -788,7 +788,10 @@ impl ViewSyncReplicaTaskState { // TODO: Figure out the correct way to view sync across epochs if needed broadcast_event( - Arc::new(HotShotEvent::ViewChange(self.next_view, self.cur_epoch)), + Arc::new(HotShotEvent::ViewChange( + self.next_view, + self.membership.epoch(), + )), &event_stream, ) .await; @@ -802,7 +805,7 @@ impl ViewSyncReplicaTaskState { return None; } - let epoch = self.cur_epoch; + let epoch = self.membership.epoch(); let Ok(vote) = ViewSyncPreCommitVote2::::create_signed_vote( ViewSyncPreCommitData2 { relay: 0, @@ -863,7 +866,7 @@ impl ViewSyncReplicaTaskState { ViewSyncPreCommitData2 { relay: self.relay, round: self.next_view, - epoch: self.cur_epoch, + epoch: self.membership.epoch(), }, self.next_view, &self.public_key, diff --git a/hotshot-task-impls/src/vote_collection.rs b/hotshot-task-impls/src/vote_collection.rs index ede632a973..eb8a4cd0aa 100644 --- a/hotshot-task-impls/src/vote_collection.rs +++ b/hotshot-task-impls/src/vote_collection.rs @@ -7,15 +7,16 @@ use std::{ collections::{btree_map::Entry, BTreeMap, HashMap}, fmt::Debug, + future::Future, marker::PhantomData, sync::Arc, }; use async_broadcast::Sender; -use async_lock::RwLock; use async_trait::async_trait; use either::Either::{Left, Right}; use hotshot_types::{ + epoch_membership::EpochMembership, message::UpgradeLock, simple_certificate::{ DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, @@ -26,10 +27,7 @@ use hotshot_types::{ DaVote2, NextEpochQuorumVote2, QuorumVote, QuorumVote2, TimeoutVote2, UpgradeVote, ViewSyncCommitVote2, ViewSyncFinalizeVote2, ViewSyncPreCommitVote2, }, - traits::{ - election::Membership, - node_implementation::{NodeType, Versions}, - }, + traits::node_implementation::{ConsensusTime, NodeType, Versions}, utils::EpochTransitionIndicator, vote::{Certificate, HasViewNumber, Vote, VoteAccumulator}, }; @@ -52,7 +50,7 @@ pub struct VoteCollectionTaskState< pub public_key: TYPES::SignatureKey, /// Membership for voting - pub membership: Arc>, + pub membership: EpochMembership, /// accumulator handles aggregating the votes pub accumulator: Option>, @@ -60,9 +58,6 @@ pub struct VoteCollectionTaskState< /// The view which we are collecting votes for pub view: TYPES::View, - /// The epoch which we are collecting votes for - pub epoch: Option, - /// Node id pub id: u64, @@ -83,9 +78,8 @@ pub trait AggregatableVote< /// if the leader cannot be calculated fn leader( &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result; + membership: &EpochMembership, + ) -> impl Future>; /// return the Hotshot event for the completion of this CERT fn make_cert_event(certificate: CERT, key: &TYPES::SignatureKey) -> HotShotEvent; @@ -107,14 +101,14 @@ impl< pub async fn accumulate_vote( &mut self, vote: &VOTE, - sender_epoch: Option, event_stream: &Sender>>, ) -> Result> { + // TODO create this only once ensure!( matches!( self.transition_indicator, EpochTransitionIndicator::InTransition - ) || vote.leader(&*self.membership.read().await, self.epoch)? == self.public_key, + ) || vote.leader(&self.membership).await? == self.public_key, info!("Received vote for a view in which we were not the leader.") ); @@ -131,10 +125,7 @@ impl< "No accumulator to handle vote with. This shouldn't happen." ))?; - match accumulator - .accumulate(vote, &self.membership, sender_epoch) - .await - { + match accumulator.accumulate(vote, self.membership.clone()).await { None => Ok(None), Some(cert) => { tracing::debug!("Certificate Formed! {:?}", cert); @@ -180,14 +171,11 @@ pub struct AccumulatorInfo { pub public_key: TYPES::SignatureKey, /// Membership we are accumulation votes for - pub membership: Arc>, + pub membership: EpochMembership, /// View of the votes we are collecting pub view: TYPES::View, - /// Epoch of the votes we are collecting - pub epoch: Option, - /// This nodes id pub id: u64, } @@ -229,11 +217,10 @@ where }; let mut state = VoteCollectionTaskState:: { - membership: Arc::clone(&info.membership), + membership: info.membership.clone(), public_key: info.public_key.clone(), accumulator: Some(new_accumulator), view: info.view, - epoch: info.epoch, id: info.id, transition_indicator, }; @@ -261,8 +248,7 @@ pub async fn handle_vote< collectors: &mut VoteCollectorsMap, vote: &VOTE, public_key: TYPES::SignatureKey, - membership: &Arc>, - epoch: Option, + membership: &EpochMembership, id: u64, event: &Arc>, event_stream: &Sender>>, @@ -277,9 +263,8 @@ where tracing::debug!("Starting vote handle for view {:?}", vote.view_number()); let info = AccumulatorInfo { public_key, - membership: Arc::clone(membership), + membership: membership.clone(), view: vote.view_number(), - epoch, id, }; let collector = create_vote_accumulator( @@ -356,12 +341,8 @@ type ViewSyncFinalizeVoteState = VoteCollectionTaskState< impl AggregatableVote, QuorumCertificate> for QuorumVote { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number() + 1).await } fn make_cert_event( certificate: QuorumCertificate, @@ -374,12 +355,8 @@ impl AggregatableVote, QuorumCertifica impl AggregatableVote, QuorumCertificate2> for QuorumVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number() + 1).await } fn make_cert_event( certificate: QuorumCertificate2, @@ -393,12 +370,15 @@ impl AggregatableVote, NextEpochQuorumCertificate2> for NextEpochQuorumVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + let epoch = membership + .epoch + .map(|e| TYPES::Epoch::new(e.saturating_sub(1))); + membership + .get_new_epoch(epoch) + .await? + .leader(self.view_number() + 1) + .await } fn make_cert_event( certificate: NextEpochQuorumCertificate2, @@ -411,12 +391,8 @@ impl impl AggregatableVote, UpgradeCertificate> for UpgradeVote { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number(), epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number()).await } fn make_cert_event( certificate: UpgradeCertificate, @@ -429,12 +405,8 @@ impl AggregatableVote, UpgradeCertifi impl AggregatableVote, DaCertificate2> for DaVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number(), epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number()).await } fn make_cert_event( certificate: DaCertificate2, @@ -447,12 +419,8 @@ impl AggregatableVote, DaCertificate2 AggregatableVote, TimeoutCertificate2> for TimeoutVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.view_number() + 1, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership.leader(self.view_number() + 1).await } fn make_cert_event( certificate: TimeoutCertificate2, @@ -466,12 +434,10 @@ impl AggregatableVote, ViewSyncCommitCertificate2> for ViewSyncCommitVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.date().round + self.date().relay, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership + .leader(self.date().round + self.date().relay) + .await } fn make_cert_event( certificate: ViewSyncCommitCertificate2, @@ -485,12 +451,10 @@ impl AggregatableVote, ViewSyncPreCommitCertificate2> for ViewSyncPreCommitVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.date().round + self.date().relay, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership + .leader(self.date().round + self.date().relay) + .await } fn make_cert_event( certificate: ViewSyncPreCommitCertificate2, @@ -504,12 +468,10 @@ impl AggregatableVote, ViewSyncFinalizeCertificate2> for ViewSyncFinalizeVote2 { - fn leader( - &self, - membership: &TYPES::Membership, - epoch: Option, - ) -> Result { - membership.leader(self.date().round + self.date().relay, epoch) + async fn leader(&self, membership: &EpochMembership) -> Result { + membership + .leader(self.date().round + self.date().relay) + .await } fn make_cert_event( certificate: ViewSyncFinalizeCertificate2, @@ -531,9 +493,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::QuorumVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::QuorumVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -556,12 +516,7 @@ impl match event.as_ref() { HotShotEvent::QuorumVoteRecv(vote) => { // #3967 REVIEW NOTE: Should we error if self.epoch is None? - let next_epoch = self - .epoch - .map(|x| x + 1) - .ok_or_else(|| error!("epoch should not be none in handle_vote_event"))?; - self.accumulate_vote(&vote.clone().into(), Some(next_epoch), sender) - .await + self.accumulate_vote(&vote.clone().into(), sender).await } _ => Ok(None), } @@ -583,9 +538,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::UpgradeVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::UpgradeVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -604,7 +557,7 @@ impl HandleVoteEvent, DaCert sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, self.epoch, sender).await, + HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -624,9 +577,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::TimeoutVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::TimeoutVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -647,7 +598,7 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await + self.accumulate_vote(vote, sender).await } _ => Ok(None), } @@ -668,9 +619,7 @@ impl sender: &Sender>>, ) -> Result>> { match event.as_ref() { - HotShotEvent::ViewSyncCommitVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await - } + HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), } } @@ -691,7 +640,7 @@ impl ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { - self.accumulate_vote(vote, self.epoch, sender).await + self.accumulate_vote(vote, sender).await } _ => Ok(None), } diff --git a/hotshot-testing/src/byzantine/byzantine_behaviour.rs b/hotshot-testing/src/byzantine/byzantine_behaviour.rs index 8fab295bd4..7816b3e354 100644 --- a/hotshot-testing/src/byzantine/byzantine_behaviour.rs +++ b/hotshot-testing/src/byzantine/byzantine_behaviour.rs @@ -339,13 +339,12 @@ impl + std::fmt::Debug, V: Version &self, handle: &mut SystemContextHandle, network: Arc<>::Network>, - membership: Arc>, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), epoch: None, - membership, + membership_coordinator: handle.membership_coordinator.clone(), storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), diff --git a/hotshot-testing/src/helpers.rs b/hotshot-testing/src/helpers.rs index 082e965b83..7e331a9d65 100644 --- a/hotshot-testing/src/helpers.rs +++ b/hotshot-testing/src/helpers.rs @@ -27,6 +27,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{vid_commitment, Leaf2, VidCommitment, VidDisperse, VidDisperseShare}, drb::INITIAL_DRB_RESULT, + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, message::{Proposal, UpgradeLock}, simple_certificate::DaCertificate2, simple_vote::{DaData2, DaVote2, SimpleVote, VersionedVoteData}, @@ -137,6 +138,7 @@ pub async fn build_system_handle_from_launcher< hotshot_config.known_da_nodes.clone(), ))); + let coordinator = EpochMembershipCoordinator::new(memberships, hotshot_config.epoch_height); let node_key_map = launcher.metadata.build_node_key_map(); let (c, s, r) = SystemContext::init( @@ -144,7 +146,7 @@ pub async fn build_system_handle_from_launcher< private_key, node_id, hotshot_config, - memberships, + coordinator, network, initializer, ConsensusMetricsValue::default(), @@ -168,18 +170,16 @@ pub async fn build_cert< CERT: Certificate, >( data: DATAType, - membership: &Arc>, + epoch_membership: &EpochMembership, view: TYPES::View, - epoch: Option, public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, ) -> CERT { let real_qc_sig = build_assembled_sig::( &data, - membership, + epoch_membership, view, - epoch, upgrade_lock, ) .await; @@ -234,19 +234,16 @@ pub async fn build_assembled_sig< DATAType: Committable + Clone + Eq + Hash + Serialize + Debug + 'static, >( data: &DATAType, - membership: &Arc>, + epoch_membership: &EpochMembership, view: TYPES::View, - epoch: Option, upgrade_lock: &UpgradeLock, ) -> ::QcType { - let membership_reader = membership.read().await; - let stake_table = CERT::stake_table(&*membership_reader, epoch); + let stake_table = CERT::stake_table(epoch_membership).await; let real_qc_pp: ::QcParams = ::public_parameter( StakeTableEntries::::from(stake_table.clone()).0, - U256::from(CERT::threshold(&*membership_reader, epoch)), + U256::from(CERT::threshold(epoch_membership).await), ); - drop(membership_reader); let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; @@ -292,10 +289,9 @@ pub fn key_pair_for_id( } pub async fn da_payload_commitment( - membership: &Arc::Membership>>, + membership: &EpochMembership, transactions: Vec, metadata: &>::Metadata, - epoch_number: Option, version: Version, ) -> VidCommitment { let encoded_transactions = TestTransaction::encode(&transactions); @@ -303,26 +299,25 @@ pub async fn da_payload_commitment( vid_commitment::( &encoded_transactions, &metadata.encode(), - membership.read().await.total_nodes(epoch_number), + membership.total_nodes().await, version, ) } pub async fn build_payload_commitment( - membership: &Arc::Membership>>, + membership: &EpochMembership, view: TYPES::View, - epoch: Option, version: Version, ) -> VidCommitment { // Make some empty encoded transactions, we just care about having a commitment handy for the // later calls. We need the VID commitment to be able to propose later. let encoded_transactions = Vec::new(); - let num_storage_nodes = membership.read().await.committee_members(view, epoch).len(); + let num_storage_nodes = membership.committee_members(view).await.len(); vid_commitment::(&encoded_transactions, &[], num_storage_nodes, version) } pub async fn build_vid_proposal( - membership: &Arc::Membership>>, + membership: &EpochMembership, view_number: TYPES::View, epoch_number: Option, payload: &TYPES::BlockPayload, @@ -335,7 +330,7 @@ pub async fn build_vid_proposal( ) { let vid_disperse = VidDisperse::calculate_vid_disperse::( payload, - membership, + &membership.coordinator, view_number, epoch_number, epoch_number, @@ -369,7 +364,7 @@ pub async fn build_vid_proposal( #[allow(clippy::too_many_arguments)] pub async fn build_da_certificate( - membership: &Arc::Membership>>, + membership: &EpochMembership, view_number: TYPES::View, epoch_number: Option, transactions: Vec, @@ -377,29 +372,27 @@ pub async fn build_da_certificate( public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, -) -> DaCertificate2 { +) -> anyhow::Result> { let encoded_transactions = TestTransaction::encode(&transactions); let da_payload_commitment = vid_commitment::( &encoded_transactions, &metadata.encode(), - membership.read().await.total_nodes(epoch_number), + membership.total_nodes().await, upgrade_lock.version_infallible(view_number).await, ); - let next_epoch_da_payload_commitment = if upgrade_lock.epochs_enabled(view_number).await { - Some(vid_commitment::( - &encoded_transactions, - &metadata.encode(), - membership - .read() - .await - .total_nodes(epoch_number.map(|e| e + 1)), - upgrade_lock.version_infallible(view_number).await, - )) - } else { - None - }; + let next_epoch_da_payload_commitment = + if upgrade_lock.epochs_enabled(view_number).await && membership.epoch().is_some() { + Some(vid_commitment::( + &encoded_transactions, + &metadata.encode(), + membership.next_epoch().await?.total_nodes().await, + upgrade_lock.version_infallible(view_number).await, + )) + } else { + None + }; let da_data = DaData2 { payload_commit: da_payload_commitment, @@ -407,16 +400,17 @@ pub async fn build_da_certificate( epoch: epoch_number, }; - build_cert::, DaVote2, DaCertificate2>( - da_data, - membership, - view_number, - epoch_number, - public_key, - private_key, - upgrade_lock, + anyhow::Ok( + build_cert::, DaVote2, DaCertificate2>( + da_data, + membership, + view_number, + public_key, + private_key, + upgrade_lock, + ) + .await, ) - .await } /// This function permutes the provided input vector `inputs`, given some order provided within the diff --git a/hotshot-testing/src/spinning_task.rs b/hotshot-testing/src/spinning_task.rs index 55b51949f9..c13fe23d6b 100644 --- a/hotshot-testing/src/spinning_task.rs +++ b/hotshot-testing/src/spinning_task.rs @@ -243,7 +243,7 @@ where }; let storage = node.handle.storage().clone(); - let memberships = Arc::clone(&node.handle.memberships); + let memberships = node.handle.membership_coordinator.clone(); let config = node.handle.hotshot.config.clone(); let marketplace_config = node.handle.hotshot.marketplace_config.clone(); @@ -297,7 +297,7 @@ where TestRunner::::add_node_with_config_and_channels( node_id, generated_network.clone(), - memberships, + Arc::clone(memberships.membership()), initializer, config, validator_config, diff --git a/hotshot-testing/src/test_builder.rs b/hotshot-testing/src/test_builder.rs index 0d9d5d861f..408c259360 100644 --- a/hotshot-testing/src/test_builder.rs +++ b/hotshot-testing/src/test_builder.rs @@ -19,6 +19,7 @@ use hotshot_types::traits::node_implementation::ConsensusTime; use hotshot_types::{ consensus::ConsensusMetricsValue, drb::INITIAL_DRB_RESULT, + epoch_membership::EpochMembershipCoordinator, traits::node_implementation::{NodeType, Versions}, HotShotConfig, PeerConfig, ValidatorConfig, }; @@ -260,6 +261,7 @@ pub async fn create_test_handle< // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let membership_coordinator = EpochMembershipCoordinator::new(memberships, config.epoch_height); let behaviour = (metadata.behaviour)(node_id); match behaviour { @@ -271,7 +273,7 @@ pub async fn create_test_handle< private_key, node_id, config, - memberships, + membership_coordinator, network, initializer, ConsensusMetricsValue::default(), @@ -290,7 +292,7 @@ pub async fn create_test_handle< private_key, node_id, config, - memberships, + membership_coordinator, network, initializer, ConsensusMetricsValue::default(), @@ -305,7 +307,7 @@ pub async fn create_test_handle< private_key, node_id, config, - memberships, + membership_coordinator, network, initializer, ConsensusMetricsValue::default(), diff --git a/hotshot-testing/src/test_runner.rs b/hotshot-testing/src/test_runner.rs index dd6cf62661..a07cb509c8 100644 --- a/hotshot-testing/src/test_runner.rs +++ b/hotshot-testing/src/test_runner.rs @@ -27,6 +27,7 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, constants::EVENT_CHANNEL_SIZE, data::Leaf2, + epoch_membership::EpochMembershipCoordinator, simple_certificate::QuorumCertificate2, traits::{ election::Membership, @@ -596,13 +597,14 @@ where // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let epoch_height = config.epoch_height; SystemContext::new( public_key, private_key, node_id, config, - Arc::new(RwLock::new(memberships)), + EpochMembershipCoordinator::new(Arc::new(RwLock::new(memberships)), epoch_height), network, initializer, ConsensusMetricsValue::default(), @@ -634,13 +636,14 @@ where // Get key pair for certificate aggregation let private_key = validator_config.private_key.clone(); let public_key = validator_config.public_key.clone(); + let epoch_height = config.epoch_height; SystemContext::new_from_channels( public_key, private_key, node_id, config, - memberships, + EpochMembershipCoordinator::new(memberships, epoch_height), network, initializer, ConsensusMetricsValue::default(), diff --git a/hotshot-testing/src/view_generator.rs b/hotshot-testing/src/view_generator.rs index 5b55391461..f46d4b1101 100644 --- a/hotshot-testing/src/view_generator.rs +++ b/hotshot-testing/src/view_generator.rs @@ -12,7 +12,6 @@ use std::{ task::{Context, Poll}, }; -use async_lock::RwLock; use committable::Committable; use futures::{FutureExt, Stream}; use hotshot::types::{BLSPubKey, SignatureKey, SystemContextHandle}; @@ -26,6 +25,7 @@ use hotshot_types::{ DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, VidDisperse, VidDisperseShare, ViewChangeEvidence2, ViewNumber, }, + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, message::{Proposal, UpgradeLock}, simple_certificate::{ DaCertificate2, QuorumCertificate2, TimeoutCertificate2, UpgradeCertificate, @@ -37,7 +37,6 @@ use hotshot_types::{ }, traits::{ consensus_api::ConsensusApi, - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, BlockPayload, }, @@ -57,7 +56,7 @@ pub struct TestView { pub leaf: Leaf2, pub view_number: ViewNumber, pub epoch_number: Option, - pub membership: Arc::Membership>>, + pub membership: EpochMembershipCoordinator, pub node_key_map: Arc, pub vid_disperse: Proposal>, pub vid_proposal: ( @@ -76,19 +75,17 @@ pub struct TestView { impl TestView { async fn find_leader_key_pair( - membership: &Arc::Membership>>, + membership: &EpochMembership, node_key_map: &Arc, view_number: ::View, - epoch: Option<::Epoch>, ) -> ( <::SignatureKey as SignatureKey>::PrivateKey, ::SignatureKey, ) { - let membership_reader = membership.read().await; - let leader = membership_reader - .leader(view_number, epoch) + let leader = membership + .leader(view_number) + .await .expect("expected Membership::leader to succeed"); - drop(membership_reader); let sk = node_key_map .get(&leader) @@ -98,7 +95,7 @@ impl TestView { } pub async fn genesis( - membership: &Arc::Membership>>, + membership: &EpochMembershipCoordinator, node_key_map: Arc, ) -> Self { let genesis_view = ViewNumber::new(1); @@ -120,27 +117,28 @@ impl TestView { &block_payload, &metadata, ); - + let epoch_membership = membership + .membership_for_epoch(genesis_epoch) + .await + .unwrap(); //let (private_key, public_key) = key_pair_for_id::(*genesis_view); let (private_key, public_key) = - Self::find_leader_key_pair(membership, &node_key_map, genesis_view, genesis_epoch) - .await; + Self::find_leader_key_pair(&epoch_membership, &node_key_map, genesis_view).await; let leader_public_key = public_key; let genesis_version = upgrade_lock.version_infallible(genesis_view).await; let payload_commitment = da_payload_commitment::( - membership, + &epoch_membership, transactions.clone(), &metadata, - genesis_epoch, genesis_version, ) .await; let (vid_disperse, vid_proposal) = build_vid_proposal::( - membership, + &epoch_membership, genesis_view, genesis_epoch, &block_payload, @@ -151,7 +149,7 @@ impl TestView { .await; let da_certificate = build_da_certificate( - membership, + &epoch_membership, genesis_view, genesis_epoch, transactions.clone(), @@ -160,7 +158,8 @@ impl TestView { &private_key, &upgrade_lock, ) - .await; + .await + .unwrap(); let block_header = TestBlockHeader::new( &Leaf2::::genesis::( @@ -258,8 +257,6 @@ impl TestView { // test view here. let next_view = max(old_view, self.view_number) + 1; - let membership = &self.membership; - let transactions = &self.transactions; let quorum_data = QuorumData2 { @@ -268,16 +265,26 @@ impl TestView { }; //let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); - let (old_private_key, old_public_key) = - Self::find_leader_key_pair(&self.membership, &self.node_key_map, old_view, old_epoch) - .await; + let (old_private_key, old_public_key) = Self::find_leader_key_pair( + &self + .membership + .membership_for_epoch(old_epoch) + .await + .unwrap(), + &self.node_key_map, + old_view, + ) + .await; //let (private_key, public_key) = key_pair_for_id::(*next_view); let (private_key, public_key) = Self::find_leader_key_pair( - &self.membership, + &self + .membership + .membership_for_epoch(self.epoch_number) + .await + .unwrap(), &self.node_key_map, next_view, - self.epoch_number, ) .await; @@ -297,17 +304,21 @@ impl TestView { ); let version = self.upgrade_lock.version_infallible(next_view).await; + let membership = self + .membership + .membership_for_epoch(self.epoch_number) + .await + .unwrap(); let payload_commitment = da_payload_commitment::( - membership, + &membership, transactions.clone(), &metadata, - self.epoch_number, version, ) .await; let (vid_disperse, vid_proposal) = build_vid_proposal::( - membership, + &membership, next_view, self.epoch_number, &block_payload, @@ -318,7 +329,7 @@ impl TestView { .await; let da_certificate = build_da_certificate::( - membership, + &membership, next_view, self.epoch_number, transactions.clone(), @@ -327,7 +338,8 @@ impl TestView { &private_key, &self.upgrade_lock, ) - .await; + .await + .unwrap(); let quorum_certificate = build_cert::< TestTypes, @@ -337,9 +349,8 @@ impl TestView { QuorumCertificate2, >( quorum_data, - membership, + &membership, old_view, - self.epoch_number, &old_public_key, &old_private_key, &self.upgrade_lock, @@ -355,9 +366,8 @@ impl TestView { UpgradeCertificate, >( data.clone(), - membership, + &membership, next_view, - self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -378,9 +388,8 @@ impl TestView { ViewSyncFinalizeCertificate2, >( data.clone(), - membership, + &membership, next_view, - self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -401,9 +410,8 @@ impl TestView { TimeoutCertificate2, >( data.clone(), - membership, + &membership, next_view, - self.epoch_number, &public_key, &private_key, &self.upgrade_lock, @@ -562,14 +570,14 @@ impl TestView { pub struct TestViewGenerator { pub current_view: Option, - pub membership: Arc::Membership>>, + pub membership: EpochMembershipCoordinator, pub node_key_map: Arc, pub _pd: PhantomData, } impl TestViewGenerator { pub fn generate( - membership: Arc::Membership>>, + membership: EpochMembershipCoordinator, node_key_map: Arc, ) -> Self { TestViewGenerator { @@ -653,14 +661,14 @@ impl Stream for TestViewGenerator { type Item = TestView; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mem = Arc::clone(&self.membership); + let epoch_membership = self.membership.clone(); let nkm = Arc::clone(&self.node_key_map); let curr_view = &self.current_view.clone(); let mut fut = if let Some(ref view) = curr_view { async move { TestView::next_view(view).await }.boxed() } else { - async move { TestView::genesis::(&mem, nkm).await }.boxed() + async move { TestView::genesis::(&epoch_membership, nkm).await }.boxed() }; match fut.as_mut().poll(cx) { diff --git a/hotshot-testing/tests/tests_1/da_task.rs b/hotshot-testing/tests/tests_1/da_task.rs index e01a2f0f7c..28eb3edc58 100644 --- a/hotshot-testing/tests/tests_1/da_task.rs +++ b/hotshot-testing/tests/tests_1/da_task.rs @@ -24,10 +24,7 @@ use hotshot_testing::{ use hotshot_types::{ data::{null_block, PackedBundle, ViewNumber}, simple_vote::DaData2, - traits::{ - election::Membership, - node_implementation::{ConsensusTime, Versions}, - }, + traits::node_implementation::{ConsensusTime, Versions}, }; use vbs::version::{StaticVersionType, Version}; @@ -38,7 +35,7 @@ async fn test_da_task() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let default_version = Version { major: 0, minor: 0 }; // Make some empty encoded transactions, we just care about having a commitment handy for the @@ -48,7 +45,11 @@ async fn test_da_task() { let payload_commit = hotshot_types::data::vid_commitment::( &encoded_transactions, &[], - handle.hotshot.memberships.read().await.total_nodes(None), + membership + .membership_for_epoch(None) + .await.unwrap() + .total_nodes() + .await, default_version, ); @@ -149,7 +150,7 @@ async fn test_da_task_storage_failure() { // Set the error flag here for the system handle. This causes it to emit an error on append. handle.storage().write().await.should_return_err = true; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let default_version = Version { major: 0, minor: 0 }; // Make some empty encoded transactions, we just care about having a commitment handy for the @@ -159,12 +160,15 @@ async fn test_da_task_storage_failure() { let payload_commit = hotshot_types::data::vid_commitment::( &encoded_transactions, &[], - handle.hotshot.memberships.read().await.total_nodes(None), + membership + .membership_for_epoch(None) + .await.unwrap() + .total_nodes() + .await, default_version, ); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); diff --git a/hotshot-testing/tests/tests_1/message.rs b/hotshot-testing/tests/tests_1/message.rs index 568b2906f4..5adc897336 100644 --- a/hotshot-testing/tests/tests_1/message.rs +++ b/hotshot-testing/tests/tests_1/message.rs @@ -6,7 +6,6 @@ #[cfg(test)] use std::marker::PhantomData; -use std::sync::Arc; use committable::Committable; use hotshot_example_types::node_types::TestTypes; @@ -69,19 +68,19 @@ async fn test_certificate2_validity() { use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; use hotshot_types::{ data::{Leaf, Leaf2}, - traits::election::Membership, vote::Certificate, }; hotshot::helpers::initialize_logging(); let node_id = 1; + let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -104,10 +103,9 @@ async fn test_certificate2_validity() { let qc2 = proposal.data.justify_qc().clone(); let qc = qc2.clone().to_qc(); - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(None); - let membership_success_threshold = membership_reader.success_threshold(None); - drop(membership_reader); + let epoch_mem = membership.membership_for_epoch(None).await.unwrap(); + let membership_stake_table = epoch_mem.stake_table().await; + let membership_success_threshold = epoch_mem.success_threshold().await; assert!(qc .is_valid_cert( diff --git a/hotshot-testing/tests/tests_1/network_task.rs b/hotshot-testing/tests/tests_1/network_task.rs index ef3584a55e..78abf7cf57 100644 --- a/hotshot-testing/tests/tests_1/network_task.rs +++ b/hotshot-testing/tests/tests_1/network_task.rs @@ -36,6 +36,7 @@ async fn test_network_task() { use std::collections::BTreeMap; use futures::StreamExt; + use hotshot_types::epoch_membership::EpochMembershipCoordinator; hotshot::helpers::initialize_logging(); @@ -61,12 +62,13 @@ async fn test_network_task() { all_nodes.clone(), all_nodes, ))); + let coordinator = EpochMembershipCoordinator::new(membership, config.epoch_height); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: None, - membership: Arc::clone(&membership), + membership_coordinator: coordinator.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -79,7 +81,7 @@ async fn test_network_task() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::::generate(membership, node_key_map); + let mut generator = TestViewGenerator::::generate(coordinator, node_key_map); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal) = async_broadcast::broadcast(10); @@ -208,6 +210,7 @@ async fn test_network_storage_fail() { use std::collections::BTreeMap; use futures::StreamExt; + use hotshot_types::epoch_membership::EpochMembershipCoordinator; hotshot::helpers::initialize_logging(); @@ -233,12 +236,13 @@ async fn test_network_storage_fail() { all_nodes.clone(), all_nodes, ))); + let coordinator = EpochMembershipCoordinator::new(membership, config.epoch_height); let network_state: NetworkEventTaskState, _> = NetworkEventTaskState { network: network.clone(), view: ViewNumber::new(0), epoch: None, - membership: Arc::clone(&membership), + membership_coordinator: coordinator.clone(), upgrade_lock: upgrade_lock.clone(), storage, consensus, @@ -251,7 +255,7 @@ async fn test_network_storage_fail() { let task = Task::new(network_state, tx.clone(), rx); task_reg.run_task(task); - let mut generator = TestViewGenerator::::generate(membership, node_key_map); + let mut generator = TestViewGenerator::::generate(coordinator, node_key_map); let view = generator.next().await.unwrap(); let (out_tx_internal, mut out_rx_internal): (Sender>>, _) = diff --git a/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs index d7cd877ddc..73642e0d44 100644 --- a/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_proposal_recv_task.rs @@ -53,7 +53,7 @@ async fn test_quorum_proposal_recv_task() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; @@ -127,7 +127,7 @@ async fn test_quorum_proposal_recv_task_liveness_check() { let (handle, _, _, node_key_map) = build_system_handle::(4).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; diff --git a/hotshot-testing/tests/tests_1/quorum_proposal_task.rs b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs index 58a8c6f568..55c79c0c25 100644 --- a/hotshot-testing/tests/tests_1/quorum_proposal_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_proposal_task.rs @@ -47,7 +47,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let version = handle .hotshot .upgrade_lock @@ -55,15 +58,13 @@ async fn test_quorum_proposal_task_quorum_proposal_view_1() { .await; let payload_commitment = build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(node_id), - None, version, ) .await; - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -147,7 +148,10 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); @@ -202,9 +206,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(1), - None, version_1, ) .await, @@ -223,9 +226,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(2), - None, version_2, ) .await, @@ -242,9 +244,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(3), - None, version_3, ) .await, @@ -261,9 +262,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(4), - None, version_4, ) .await, @@ -280,9 +280,8 @@ async fn test_quorum_proposal_task_quorum_proposal_view_gt_1() { Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(5), - None, version_5, ) .await, @@ -324,9 +323,13 @@ async fn test_quorum_proposal_task_qc_timeout() { hotshot::helpers::initialize_logging(); let node_id = 3; + let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let version = handle .hotshot .upgrade_lock @@ -334,16 +337,14 @@ async fn test_quorum_proposal_task_qc_timeout() { .await; let payload_commitment = build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(node_id), - None, version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -421,7 +422,10 @@ async fn test_quorum_proposal_task_view_sync() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); let version = handle .hotshot .upgrade_lock @@ -429,16 +433,14 @@ async fn test_quorum_proposal_task_view_sync() { .await; let payload_commitment = build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(node_id), - None, version, ) .await; let builder_commitment = BuilderCommitment::from_raw_digest(sha2::Sha256::new().finalize()); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -516,10 +518,12 @@ async fn test_quorum_proposal_task_liveness_check() { let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); let mut proposals = Vec::new(); let mut leaders = Vec::new(); @@ -570,9 +574,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(1), - None, version_1, ) .await, @@ -591,9 +594,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(2), - None, version_2, ) .await, @@ -610,9 +612,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(3), - None, version_3, ) .await, @@ -629,9 +630,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[3].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(4), - None, version_4, ) .await, @@ -648,9 +648,8 @@ async fn test_quorum_proposal_task_liveness_check() { Qc2Formed(either::Left(proposals[4].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(5), - None, version_5, ) .await, @@ -690,8 +689,7 @@ async fn test_quorum_proposal_task_with_incomplete_events() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); - + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); let mut proposals = Vec::new(); diff --git a/hotshot-testing/tests/tests_1/quorum_vote_task.rs b/hotshot-testing/tests/tests_1/quorum_vote_task.rs index ae8c2bc9ad..170eaa363c 100644 --- a/hotshot-testing/tests/tests_1/quorum_vote_task.rs +++ b/hotshot-testing/tests/tests_1/quorum_vote_task.rs @@ -44,7 +44,7 @@ async fn test_quorum_vote_task_success() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); @@ -110,7 +110,7 @@ async fn test_quorum_vote_task_miss_dependency() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); @@ -193,7 +193,7 @@ async fn test_quorum_vote_task_incorrect_dependency() { let (handle, _, _, node_key_map) = build_system_handle::(2).await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); diff --git a/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs index 00f970a40b..d8533acee2 100644 --- a/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs +++ b/hotshot-testing/tests/tests_1/upgrade_task_with_proposal.rs @@ -82,10 +82,12 @@ async fn test_upgrade_task_with_proposal() { let consensus = handle.hotshot.consensus(); let mut consensus_writer = consensus.write().await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let epoch_1_mem = membership + .membership_for_epoch(Some(EpochNumber::new(1))) + .await.unwrap(); - let mut generator = - TestViewGenerator::::generate(Arc::clone(&membership), node_key_map); + let mut generator = TestViewGenerator::::generate(membership.clone(), node_key_map); for view in (&mut generator).take(1).collect::>().await { proposals.push(view.quorum_proposal.clone()); @@ -157,9 +159,8 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(genesis_cert.clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(1), - None, version_1, ) .await, @@ -178,9 +179,8 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(proposals[1].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(2), - None, version_2, ) .await, @@ -198,9 +198,8 @@ async fn test_upgrade_task_with_proposal() { Qc2Formed(either::Left(proposals[2].data.justify_qc().clone())), SendPayloadCommitmentAndMetadata( build_payload_commitment::( - &membership, + &epoch_1_mem, ViewNumber::new(3), - None, version_3, ) .await, diff --git a/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs b/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs index 4047f95607..2597ba3690 100644 --- a/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs +++ b/hotshot-testing/tests/tests_1/upgrade_task_with_vote.rs @@ -69,7 +69,7 @@ async fn test_upgrade_task_with_vote() { let consensus = handle.hotshot.consensus().clone(); let mut consensus_writer = consensus.write().await; - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); for view in (&mut generator).take(2).collect::>().await { diff --git a/hotshot-testing/tests/tests_1/vid_task.rs b/hotshot-testing/tests/tests_1/vid_task.rs index cfde747d7d..b5f7520888 100644 --- a/hotshot-testing/tests/tests_1/vid_task.rs +++ b/hotshot-testing/tests/tests_1/vid_task.rs @@ -44,8 +44,16 @@ async fn test_vid_task() { .0; let pub_key = handle.public_key(); - let membership = Arc::clone(&handle.hotshot.memberships); + let membership = handle.hotshot.membership_coordinator.clone(); + let default_version = Version { major: 0, minor: 0 }; + + let mut vid = vid_scheme_from_view_number::( + &membership.membership_for_epoch(None).await.unwrap(), + ViewNumber::new(0), + default_version, + ) + .await; let upgrade_lock = UpgradeLock::::new(); let transactions = vec![TestTransaction::new(vec![0])]; diff --git a/hotshot-testing/tests/tests_1/vote_dependency_handle.rs b/hotshot-testing/tests/tests_1/vote_dependency_handle.rs index e6e40f2b8c..447639c3da 100644 --- a/hotshot-testing/tests/tests_1/vote_dependency_handle.rs +++ b/hotshot-testing/tests/tests_1/vote_dependency_handle.rs @@ -35,8 +35,7 @@ async fn test_vote_dependency_handle() { // Construct the system handle for the node ID to build all of the state objects. let (handle, _, _, node_key_map) = build_system_handle::(node_id).await; - let membership = Arc::clone(&handle.hotshot.memberships); - + let membership = handle.hotshot.membership_coordinator.clone(); let mut generator = TestViewGenerator::::generate(membership, node_key_map); // Generate our state for the test @@ -89,7 +88,7 @@ async fn test_vote_dependency_handle() { consensus: OuterConsensus::new(consensus.clone()), consensus_metrics: Arc::clone(&consensus.read().await.metrics), instance_state: handle.hotshot.instance_state(), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), storage: Arc::clone(&handle.storage()), view_number, sender: event_sender.clone(), diff --git a/hotshot-types/Cargo.toml b/hotshot-types/Cargo.toml index e236b4acc3..e65ea80623 100644 --- a/hotshot-types/Cargo.toml +++ b/hotshot-types/Cargo.toml @@ -14,6 +14,7 @@ ark-ff = { workspace = true } ark-serialize = { workspace = true } ark-srs = { version = "0.3.1" } ark-std = { workspace = true } +async-broadcast = { workspace = true } async-lock = { workspace = true } async-trait = { workspace = true } bincode = { workspace = true } diff --git a/hotshot-types/src/consensus.rs b/hotshot-types/src/consensus.rs index c775880206..0849b4d4d9 100644 --- a/hotshot-types/src/consensus.rs +++ b/hotshot-types/src/consensus.rs @@ -23,6 +23,7 @@ pub use crate::utils::{View, ViewInner}; use crate::{ data::{Leaf2, QuorumProposalWrapper, VidCommitment, VidDisperse, VidDisperseShare}, drb::DrbSeedsAndResults, + epoch_membership::EpochMembershipCoordinator, error::HotShotError, event::{HotShotAction, LeafInfo}, message::{Proposal, UpgradeLock}, @@ -962,7 +963,7 @@ impl Consensus { consensus: OuterConsensus, view: ::View, target_epoch: Option<::Epoch>, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, ) -> Option<()> { @@ -977,7 +978,7 @@ impl Consensus { let vid = VidDisperse::calculate_vid_disperse::( &payload_with_metadata.payload, - &membership, + &membership_coordinator, view, target_epoch, epoch, diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index e61063227f..3deb60f5dd 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -31,6 +31,7 @@ use vid_disperse::{ADVZDisperse, ADVZDisperseShare, AvidMDisperse, VidDisperseSh use crate::{ drb::DrbResult, + epoch_membership::EpochMembershipCoordinator, impl_has_epoch, impl_has_none_epoch, message::{convert_proposal, Proposal, UpgradeLock}, simple_certificate::{ @@ -428,7 +429,7 @@ impl VidDisperse { #[allow(clippy::panic)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, - membership: &Arc>, + membership: &EpochMembershipCoordinator, view: TYPES::View, target_epoch: Option, data_epoch: Option, diff --git a/hotshot-types/src/data/vid_disperse.rs b/hotshot-types/src/data/vid_disperse.rs index 0e21767906..f7c90e5c68 100644 --- a/hotshot-types/src/data/vid_disperse.rs +++ b/hotshot-types/src/data/vid_disperse.rs @@ -6,21 +6,21 @@ //! This module provides types for VID disperse related data structures. -use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, marker::PhantomData}; -use async_lock::RwLock; use hotshot_utils::anytrace::*; use jf_vid::{VidDisperse as JfVidDisperse, VidScheme}; use serde::{Deserialize, Serialize}; use tokio::task::spawn_blocking; use crate::{ + epoch_membership::{EpochMembership, EpochMembershipCoordinator}, impl_has_epoch, message::Proposal, simple_vote::HasEpoch, traits::{ - block_contents::EncodeBytes, election::Membership, node_implementation::NodeType, - signature_key::SignatureKey, BlockPayload, + block_contents::EncodeBytes, node_implementation::NodeType, signature_key::SignatureKey, + BlockPayload, }, vid::{ advz::{advz_scheme, ADVZCommitment, ADVZCommon, ADVZScheme, ADVZShare}, @@ -67,14 +67,16 @@ impl ADVZDisperse { async fn from_membership( view_number: TYPES::View, mut vid_disperse: JfVidDisperse, - membership: &Arc>, + membership: &EpochMembershipCoordinator, target_epoch: Option, data_epoch: Option, ) -> Self { let shares = membership - .read() + .membership_for_epoch(target_epoch) + .await + .unwrap() + .committee_members(view_number) .await - .committee_members(view_number, target_epoch) .iter() .map(|node| (node.clone(), vid_disperse.shares.remove(0))) .collect(); @@ -97,12 +99,17 @@ impl ADVZDisperse { #[allow(clippy::panic)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, - membership: &Arc>, + membership: &EpochMembershipCoordinator, view: TYPES::View, target_epoch: Option, data_epoch: Option, ) -> Result { - let num_nodes = membership.read().await.total_nodes(target_epoch); + let num_nodes = membership + .membership_for_epoch(target_epoch) + .await? + .total_nodes() + .await; + let txns = payload.encode(); let vid_disperse = spawn_blocking(move || advz_scheme(num_nodes).disperse(&txns)) @@ -274,15 +281,14 @@ impl AvidMDisperse { view_number: TYPES::View, commit: AvidMCommitment, shares: &[AvidMShare], - membership: &Arc>, + membership: &EpochMembership, target_epoch: Option, data_epoch: Option, ) -> Self { let payload_byte_len = shares[0].payload_byte_len(); let shares = membership - .read() + .committee_members(view_number) .await - .committee_members(view_number, target_epoch) .iter() .zip(shares) .map(|(node, share)| (node.clone(), share.clone())) @@ -307,13 +313,14 @@ impl AvidMDisperse { #[allow(clippy::single_range_in_vec_init)] pub async fn calculate_vid_disperse( payload: &TYPES::BlockPayload, - membership: &Arc>, + membership: &EpochMembershipCoordinator, view: TYPES::View, target_epoch: Option, data_epoch: Option, metadata: &>::Metadata, ) -> Result { - let num_nodes = membership.read().await.total_nodes(target_epoch); + let target_mem = membership.membership_for_epoch(target_epoch).await?; + let num_nodes = target_mem.total_nodes().await; let txns = payload.encode(); let num_txns = txns.len(); @@ -333,7 +340,7 @@ impl AvidMDisperse { .context(|err| error!("Failed to calculate VID disperse. Error: {}", err))?; Ok( - Self::from_membership(view, commit, &shares, membership, target_epoch, data_epoch) + Self::from_membership(view, commit, &shares, &target_mem, target_epoch, data_epoch) .await, ) } diff --git a/hotshot-types/src/epoch_membership.rs b/hotshot-types/src/epoch_membership.rs new file mode 100644 index 0000000000..00001fcb8f --- /dev/null +++ b/hotshot-types/src/epoch_membership.rs @@ -0,0 +1,425 @@ +use std::collections::BTreeSet; +use std::num::NonZeroU64; +use std::{collections::HashMap, sync::Arc}; + +use async_broadcast::{broadcast, InactiveReceiver}; +use async_lock::{Mutex, RwLock}; +use hotshot_utils::anytrace::{self, Error, Level, Result, DEFAULT_LOG_LEVEL}; +use hotshot_utils::{ensure, line_info, log, warn}; + +use crate::drb::DrbResult; +use crate::traits::election::Membership; +use crate::traits::node_implementation::{ConsensusTime, NodeType}; +use crate::utils::root_block_in_epoch; +use crate::PeerConfig; + +type EpochMap = + HashMap<::Epoch, InactiveReceiver>>>; + +/// Struct to Coordinate membership catchup +pub struct EpochMembershipCoordinator { + /// The underlying membhersip + membership: Arc>, + + /// Any in progress attempts at catching up are stored in this map + /// Any new callers wantin an `EpochMembership` will await on the signal + /// alerting them the membership is ready. The first caller for an epoch will + /// wait for the actual catchup and allert future callers when it's done + catchup_map: Arc>>, + + /// Number of blocks in an epoch + pub epoch_height: u64, +} + +impl Clone for EpochMembershipCoordinator { + fn clone(&self) -> Self { + Self { + membership: Arc::clone(&self.membership), + catchup_map: Arc::clone(&self.catchup_map), + epoch_height: self.epoch_height, + } + } +} +// async fn catchup_membership(coordinator: EpochMembershipCoordinator) { + +// } + +impl EpochMembershipCoordinator +where + Self: Send, +{ + /// Create an EpochMembershipCoordinator + pub fn new(membership: Arc>, epoch_height: u64) -> Self { + Self { + membership, + catchup_map: Arc::default(), + epoch_height, + } + } + + /// Get a reference to the membership + #[must_use] + pub fn membership(&self) -> &Arc> { + &self.membership + } + + /// Get a Membership for a given Epoch, which is guaranteed to have a stake + /// table for the given Epoch + pub async fn membership_for_epoch( + &self, + maybe_epoch: Option, + ) -> Result> { + let ret_val = EpochMembership { + epoch: maybe_epoch, + coordinator: self.clone(), + }; + let Some(epoch) = maybe_epoch else { + return Ok(ret_val); + }; + if self.membership.read().await.has_epoch(epoch) { + return Ok(ret_val); + } + if self.catchup_map.lock().await.contains_key(&epoch) { + return Err(warn!( + "Stake table for Epoch {:?} Unavailable. Catch up already in Progress", + epoch + )); + } + let coordinator = self.clone(); + spawn_catchup(coordinator, epoch); + + Err(warn!( + "Stake table for Epoch {:?} Unavailable. Starting catchup", + epoch + )) + } + + /// Catches the membership up to the epoch passed as an argument. + /// To do this try to get the stake table for the epoch containing this epoch's root + /// if the root does not exist recursively catchup until you've found it + /// + /// If there is another catchup in progress this will not duplicate efforts + /// e.g. if we start with only epoch 0 stake table and call catchup for epoch 10, then call catchup for epoch 20 + /// the first caller will actually do the work for to catchup to epoch 10 then the second caller will continue + /// catching up to epoch 20 + async fn catchup(self, epoch: TYPES::Epoch) -> Result> { + // recursively catchup until we have a stake table for the epoch containing our root + ensure!( + *epoch != 0 && *epoch != 1, + "We are trying to catchup to epoch 0! This means the initial stake table is missing!" + ); + let root_epoch = TYPES::Epoch::new(*epoch - 2); + + let root_membership = if self.membership.read().await.has_epoch(root_epoch) { + EpochMembership { + epoch: Some(root_epoch), + coordinator: self.clone(), + } + } else { + Box::pin(self.wait_for_catchup(root_epoch)).await? + }; + + // Get the epoch root headers and update our membership with them, finally sync them + // Verification of the root is handled in get_epoch_root + let (next_epoch, header) = root_membership + .get_epoch_root(root_block_in_epoch(*root_epoch, self.epoch_height)) + .await + .ok_or(anytrace::warn!("get epoch root failed"))?; + let updater = self + .membership + .read() + .await + .add_epoch_root(next_epoch, header) + .await + .ok_or(anytrace::warn!("add epoch root failed"))?; + updater(&mut *(self.membership.write().await)); + + Ok(EpochMembership { + epoch: Some(epoch), + coordinator: self.clone(), + }) + } + + pub async fn wait_for_catchup(&self, epoch: TYPES::Epoch) -> Result> { + let Some(mut rx) = self + .catchup_map + .lock() + .await + .get(&epoch) + .map(InactiveReceiver::activate_cloned) + else { + return self.clone().catchup(epoch).await; + }; + let Ok(Ok(mem)) = rx.recv_direct().await else { + return self.clone().catchup(epoch).await; + }; + Ok(mem) + } +} + +fn spawn_catchup(coordinator: EpochMembershipCoordinator, epoch: T::Epoch) { + tokio::spawn(async move { + let tx = { + let mut map = coordinator.catchup_map.lock().await; + if map.contains_key(&epoch) { + return; + } + let (tx, rx) = broadcast(1); + map.insert(epoch, rx.deactivate()); + tx + }; + // do catchup + let ret = coordinator.catchup(epoch).await; + let _ = tx.broadcast_direct(ret).await; + }); +} +/// Wrapper around a membership that guarantees that the epoch +/// has a stake table +pub struct EpochMembership { + /// Epoch the `membership` is guaranteed to have a stake table for + pub epoch: Option, + /// Underlying membership + pub coordinator: EpochMembershipCoordinator, +} + +impl Clone for EpochMembership { + fn clone(&self) -> Self { + Self { + coordinator: self.coordinator.clone(), + epoch: self.epoch, + } + } +} + +impl EpochMembership { + /// Get the epoch this membership is good for + pub fn epoch(&self) -> Option { + self.epoch + } + + /// Get a membership for the next epoch + pub async fn next_epoch(&self) -> Result { + ensure!( + self.epoch().is_some(), + "No next epoch because epoch is None" + ); + self.coordinator + .membership_for_epoch(self.epoch.map(|e| e + 1)) + .await + } + pub async fn get_new_epoch(&self, epoch: Option) -> Result { + self.coordinator.membership_for_epoch(epoch).await + } + + /// Wraps the same named Membership trait fn + async fn get_epoch_root( + &self, + block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + self.coordinator + .membership + .read() + .await + .get_epoch_root(block_height) + .await + } + + /// Get all participants in the committee (including their stake) for a specific epoch + pub async fn stake_table(&self) -> Vec> { + self.coordinator + .membership + .read() + .await + .stake_table(self.epoch) + } + + /// Get all participants in the committee (including their stake) for a specific epoch + pub async fn da_stake_table(&self) -> Vec> { + self.coordinator + .membership + .read() + .await + .da_stake_table(self.epoch) + } + + /// Get all participants in the committee for a specific view for a specific epoch + pub async fn committee_members( + &self, + view_number: TYPES::View, + ) -> BTreeSet { + self.coordinator + .membership + .read() + .await + .committee_members(view_number, self.epoch) + } + + /// Get all participants in the committee for a specific view for a specific epoch + pub async fn da_committee_members( + &self, + view_number: TYPES::View, + ) -> BTreeSet { + self.coordinator + .membership + .read() + .await + .da_committee_members(view_number, self.epoch) + } + + /// Get all leaders in the committee for a specific view for a specific epoch + pub async fn committee_leaders( + &self, + view_number: TYPES::View, + ) -> BTreeSet { + self.coordinator + .membership + .read() + .await + .committee_leaders(view_number, self.epoch) + } + + /// Get the stake table entry for a public key, returns `None` if the + /// key is not in the table for a specific epoch + pub async fn stake( + &self, + pub_key: &TYPES::SignatureKey, + ) -> Option> { + self.coordinator + .membership + .read() + .await + .stake(pub_key, self.epoch) + } + + /// Get the DA stake table entry for a public key, returns `None` if the + /// key is not in the table for a specific epoch + pub async fn da_stake( + &self, + pub_key: &TYPES::SignatureKey, + ) -> Option> { + self.coordinator + .membership + .read() + .await + .da_stake(pub_key, self.epoch) + } + + /// See if a node has stake in the committee in a specific epoch + pub async fn has_stake(&self, pub_key: &TYPES::SignatureKey) -> bool { + self.coordinator + .membership + .read() + .await + .has_stake(pub_key, self.epoch) + } + + /// See if a node has stake in the committee in a specific epoch + pub async fn has_da_stake(&self, pub_key: &TYPES::SignatureKey) -> bool { + self.coordinator + .membership + .read() + .await + .has_da_stake(pub_key, self.epoch) + } + + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// Note: this function uses a HotShot-internal error type. + /// You should implement `lookup_leader`, rather than implementing this function directly. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated. + pub async fn leader(&self, view: TYPES::View) -> Result { + self.coordinator + .membership + .read() + .await + .leader(view, self.epoch) + } + + /// The leader of the committee for view `view_number` in `epoch`. + /// + /// Note: There is no such thing as a DA leader, so any consumer + /// requiring a leader should call this. + /// + /// # Errors + /// Returns an error if the leader cannot be calculated + pub async fn lookup_leader( + &self, + view: TYPES::View, + ) -> std::result::Result< + TYPES::SignatureKey, + <::Membership as Membership>::Error, + > { + self.coordinator + .membership + .read() + .await + .lookup_leader(view, self.epoch) + } + + /// Returns the number of total nodes in the committee in an epoch `epoch` + pub async fn total_nodes(&self) -> usize { + self.coordinator + .membership + .read() + .await + .total_nodes(self.epoch) + } + + /// Returns the number of total DA nodes in the committee in an epoch `epoch` + pub async fn da_total_nodes(&self) -> usize { + self.coordinator + .membership + .read() + .await + .da_total_nodes(self.epoch) + } + + /// Returns the threshold for a specific `Membership` implementation + pub async fn success_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .success_threshold(self.epoch) + } + + /// Returns the DA threshold for a specific `Membership` implementation + pub async fn da_success_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .da_success_threshold(self.epoch) + } + + /// Returns the threshold for a specific `Membership` implementation + pub async fn failure_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .failure_threshold(self.epoch) + } + + /// Returns the threshold required to upgrade the network protocol + pub async fn upgrade_threshold(&self) -> NonZeroU64 { + self.coordinator + .membership + .read() + .await + .upgrade_threshold(self.epoch) + } + + /// Add the epoch result to the membership + pub async fn add_drb_result(&self, drb_result: DrbResult) { + if let Some(epoch) = self.epoch() { + self.coordinator + .membership + .write() + .await + .add_drb_result(epoch, drb_result) + } + } +} diff --git a/hotshot-types/src/lib.rs b/hotshot-types/src/lib.rs index 9066f14e08..eac8950a5e 100644 --- a/hotshot-types/src/lib.rs +++ b/hotshot-types/src/lib.rs @@ -22,6 +22,8 @@ pub mod constants; pub mod data; /// Holds the types and functions for DRB computation. pub mod drb; +/// Epoch Membership wrappers +pub mod epoch_membership; pub mod error; pub mod event; /// Holds the configuration file specification for a HotShot node. diff --git a/hotshot-types/src/message.rs b/hotshot-types/src/message.rs index b2ecca7840..12e07efd87 100644 --- a/hotshot-types/src/message.rs +++ b/hotshot-types/src/message.rs @@ -30,6 +30,7 @@ use crate::{ DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, UpgradeProposal, }, + epoch_membership::EpochMembership, request_response::ProposalRequestPayload, simple_certificate::{ DaCertificate, DaCertificate2, NextEpochQuorumCertificate2, QuorumCertificate2, @@ -43,13 +44,12 @@ use crate::{ ViewSyncPreCommitVote, ViewSyncPreCommitVote2, }, traits::{ - block_contents::BlockHeader, election::Membership, network::{DataRequest, ResponseMessage, ViewMessage}, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, - utils::{mnemonic, option_epoch_from_block_number}, + utils::mnemonic, vote::HasViewNumber, }; @@ -588,18 +588,9 @@ where /// Checks that the signature of the quorum proposal is valid. /// # Errors /// Returns an error when the proposal signature is invalid. - pub fn validate_signature( - &self, - membership: &TYPES::Membership, - epoch_height: u64, - ) -> Result<()> { + pub async fn validate_signature(&self, membership: &EpochMembership) -> Result<()> { let view_number = self.data.proposal.view_number(); - let proposal_epoch = option_epoch_from_block_number::( - self.data.proposal.epoch().is_some(), - self.data.block_header().block_number(), - epoch_height, - ); - let view_leader_key = membership.leader(view_number, proposal_epoch)?; + let view_leader_key = membership.leader(view_number).await?; let proposed_leaf = Leaf2::from_quorum_proposal(&self.data); ensure!( diff --git a/hotshot-types/src/simple_certificate.rs b/hotshot-types/src/simple_certificate.rs index 82092027b5..2c9dbeaa63 100644 --- a/hotshot-types/src/simple_certificate.rs +++ b/hotshot-types/src/simple_certificate.rs @@ -8,6 +8,7 @@ use std::{ fmt::{self, Debug, Display, Formatter}, + future::Future, hash::Hash, marker::PhantomData, num::NonZeroU64, @@ -22,6 +23,7 @@ use serde::{Deserialize, Serialize}; use crate::{ data::serialize_signature2, + epoch_membership::EpochMembership, message::UpgradeLock, simple_vote::{ DaData, DaData2, HasEpoch, NextEpochQuorumData2, QuorumData, QuorumData2, QuorumMarker, @@ -30,7 +32,6 @@ use crate::{ ViewSyncPreCommitData2, Voteable, }, traits::{ - election::Membership, node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, @@ -41,10 +42,7 @@ use crate::{ /// Trait which allows use to inject different threshold calculations into a Certificate type pub trait Threshold { /// Calculate a threshold based on the membership - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64; + fn threshold(membership: &EpochMembership) -> impl Future + Send; } /// Defines a threshold which is 2f + 1 (Amount needed for Quorum) @@ -52,11 +50,8 @@ pub trait Threshold { pub struct SuccessThreshold {} impl Threshold for SuccessThreshold { - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.success_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.success_threshold().await.into() } } @@ -65,11 +60,8 @@ impl Threshold for SuccessThreshold { pub struct OneHonestThreshold {} impl Threshold for OneHonestThreshold { - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.failure_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.failure_threshold().await.into() } } @@ -78,11 +70,8 @@ impl Threshold for OneHonestThreshold { pub struct UpgradeThreshold {} impl Threshold for UpgradeThreshold { - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.upgrade_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.upgrade_threshold().await.into() } } @@ -189,33 +178,25 @@ impl> Certificate .context(|e| warn!("Signature check failed: {}", e)) } /// Proxy's to `Membership.stake` - fn stake_table_entry>( - membership: &MEMBERSHIP, + async fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, ) -> Option> { - membership.da_stake(pub_key, epoch) + membership.da_stake(pub_key).await } /// Proxy's to `Membership.da_stake_table` - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, + async fn stake_table( + membership: &EpochMembership, ) -> Vec> { - membership.da_stake_table(epoch) + membership.da_stake_table().await } /// Proxy's to `Membership.da_total_nodes` - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize { - membership.da_total_nodes(epoch) + async fn total_nodes(membership: &EpochMembership) -> usize { + membership.da_total_nodes().await } - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.da_success_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.da_success_threshold().await.into() } fn data(&self) -> &Self::Voteable { &self.data @@ -278,33 +259,25 @@ impl> Certificate>( - membership: &MEMBERSHIP, + async fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, ) -> Option> { - membership.da_stake(pub_key, epoch) + membership.da_stake(pub_key).await } /// Proxy's to `Membership.da_stake_table` - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, + async fn stake_table( + membership: &EpochMembership, ) -> Vec> { - membership.da_stake_table(epoch) + membership.da_stake_table().await } /// Proxy's to `Membership.da_total_nodes` - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize { - membership.da_total_nodes(epoch) + async fn total_nodes(membership: &EpochMembership) -> usize { + membership.da_total_nodes().await } - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - membership.da_success_threshold(epoch).into() + async fn threshold(membership: &EpochMembership) -> u64 { + membership.da_success_threshold().await.into() } fn data(&self) -> &Self::Voteable { &self.data @@ -369,34 +342,26 @@ impl< .wrap() .context(|e| warn!("Signature check failed: {}", e)) } - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64 { - THRESHOLD::threshold(membership, epoch) + async fn threshold(membership: &EpochMembership) -> u64 { + THRESHOLD::threshold(membership).await } - fn stake_table_entry>( - membership: &MEMBERSHIP, + async fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, ) -> Option> { - membership.stake(pub_key, epoch) + membership.stake(pub_key).await } - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, + async fn stake_table( + membership: &EpochMembership, ) -> Vec> { - membership.stake_table(epoch) + membership.stake_table().await } /// Proxy's to `Membership.total_nodes` - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize { - membership.total_nodes(epoch) + async fn total_nodes(membership: &EpochMembership) -> usize { + membership.total_nodes().await } fn data(&self) -> &Self::Voteable { @@ -467,15 +432,14 @@ impl UpgradeCertificate { /// Returns an error when the upgrade certificate is invalid. pub async fn validate( upgrade_certificate: &Option, - membership: &RwLock, + membership: &EpochMembership, epoch: Option, upgrade_lock: &UpgradeLock, ) -> Result<()> { + ensure!(epoch == membership.epoch(), "Epochs don't match!"); if let Some(ref cert) = upgrade_certificate { - let membership_reader = membership.read().await; - let membership_stake_table = membership_reader.stake_table(epoch); - let membership_upgrade_threshold = membership_reader.upgrade_threshold(epoch); - drop(membership_reader); + let membership_stake_table = membership.stake_table().await; + let membership_upgrade_threshold = membership.upgrade_threshold().await; cert.is_valid_cert( StakeTableEntries::::from(membership_stake_table).0, diff --git a/hotshot-types/src/traits/election.rs b/hotshot-types/src/traits/election.rs index b57065f435..b0f162f170 100644 --- a/hotshot-types/src/traits/election.rs +++ b/hotshot-types/src/traits/election.rs @@ -7,13 +7,11 @@ //! The election trait, used to decide which node is the leader and determine if a vote is valid. use std::{collections::BTreeSet, fmt::Debug, num::NonZeroU64}; -use async_trait::async_trait; use hotshot_utils::anytrace::Result; use super::node_implementation::NodeType; use crate::{drb::DrbResult, PeerConfig}; -#[async_trait] /// A protocol for determining membership in and participating in a committee. pub trait Membership: Debug + Send + Sync { /// The error type returned by methods like `lookup_leader`. @@ -125,18 +123,28 @@ pub trait Membership: Debug + Send + Sync { /// Returns the threshold required to upgrade the network protocol fn upgrade_threshold(&self, epoch: Option) -> NonZeroU64; + /// Returns if the stake table is available for the current Epoch + fn has_epoch(&self, epoch: TYPES::Epoch) -> bool; + + /// Gets the validated block header and epoch number of the epoch root + /// at the given block height + fn get_epoch_root( + &self, + block_height: u64, + ) -> impl std::future::Future> + Send; + #[allow(clippy::type_complexity)] /// Handles notifications that a new epoch root has been created /// Is called under a read lock to the Membership. Return a callback /// with Some to have that callback invoked under a write lock. /// /// #3967 REVIEW NOTE: this is only called if epoch is Some. Is there any reason to do otherwise? - async fn add_epoch_root( + fn add_epoch_root( &self, _epoch: TYPES::Epoch, _block_header: TYPES::BlockHeader, - ) -> Option> { - None + ) -> impl std::future::Future>> + Send { + async { None } } /// Called to notify the Membership when a new DRB result has been calculated. diff --git a/hotshot-types/src/traits/network.rs b/hotshot-types/src/traits/network.rs index 4d85ee82dc..fc9086106f 100644 --- a/hotshot-types/src/traits/network.rs +++ b/hotshot-types/src/traits/network.rs @@ -17,7 +17,6 @@ use std::{ time::Duration, }; -use async_lock::RwLock; use async_trait::async_trait; use dyn_clone::DynClone; use futures::{future::join_all, Future}; @@ -30,7 +29,10 @@ use thiserror::Error; use tokio::{sync::mpsc::error::TrySendError, time::sleep}; use super::{node_implementation::NodeType, signature_key::SignatureKey}; -use crate::{data::ViewNumber, message::SequencingMessage, BoxSyncFuture}; +use crate::{ + data::ViewNumber, epoch_membership::EpochMembershipCoordinator, message::SequencingMessage, + BoxSyncFuture, +}; /// Centralized server specific errors #[derive(Debug, Error, Serialize, Deserialize)] @@ -263,7 +265,7 @@ pub trait ConnectedNetwork: Clone + Send + Sync + 'st &'a self, _view: u64, _epoch: Option, - _membership: Arc>, + _membership_coordinator: EpochMembershipCoordinator, ) where TYPES: NodeType + 'a, { diff --git a/hotshot-types/src/utils.rs b/hotshot-types/src/utils.rs index 5f4031e77b..28b9b72aea 100644 --- a/hotshot-types/src/utils.rs +++ b/hotshot-types/src/utils.rs @@ -6,12 +6,6 @@ //! Utility functions, type aliases, helper structs and enum definitions. -use std::{ - hash::{Hash, Hasher}, - ops::Deref, - sync::Arc, -}; - use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use bincode::{ config::{ @@ -24,6 +18,11 @@ use committable::Commitment; use digest::OutputSizeUser; use serde::{Deserialize, Serialize}; use sha2::Digest; +use std::{ + hash::{Hash, Hasher}, + ops::Deref, + sync::Arc, +}; use tagged_base64::tagged; use typenum::Unsigned; use vbs::version::StaticVersionType; diff --git a/hotshot-types/src/vote.rs b/hotshot-types/src/vote.rs index 789615ab80..e33e66f57f 100644 --- a/hotshot-types/src/vote.rs +++ b/hotshot-types/src/vote.rs @@ -8,12 +8,11 @@ use std::{ collections::{BTreeMap, HashMap}, + future::Future, marker::PhantomData, num::NonZeroU64, - sync::Arc, }; -use async_lock::RwLock; use bitvec::{bitvec, vec::BitVec}; use committable::{Commitment, Committable}; use hotshot_utils::anytrace::*; @@ -21,11 +20,11 @@ use primitive_types::U256; use tracing::error; use crate::{ + epoch_membership::EpochMembership, message::UpgradeLock, simple_certificate::Threshold, simple_vote::{VersionedVoteData, Voteable}, traits::{ - election::Membership, node_implementation::{NodeType, Versions}, signature_key::{SignatureKey, StakeTableEntryType}, }, @@ -83,29 +82,21 @@ pub trait Certificate: HasViewNumber { ) -> impl std::future::Future>; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> u64; + fn threshold(membership: &EpochMembership) -> impl Future + Send; /// Get Stake Table from Membership implementation. - fn stake_table>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> Vec>; + fn stake_table( + membership: &EpochMembership, + ) -> impl Future>> + Send; /// Get Total Nodes from Membership implementation. - fn total_nodes>( - membership: &MEMBERSHIP, - epoch: Option, - ) -> usize; + fn total_nodes(membership: &EpochMembership) -> impl Future + Send; /// Get `StakeTableEntry` from Membership implementation. - fn stake_table_entry>( - membership: &MEMBERSHIP, + fn stake_table_entry( + membership: &EpochMembership, pub_key: &TYPES::SignatureKey, - epoch: Option, - ) -> Option>; + ) -> impl Future>> + Send; /// Get the commitment which was voted on fn data(&self) -> &Self::Voteable; @@ -164,8 +155,7 @@ impl< pub async fn accumulate( &mut self, vote: &VOTE, - membership: &Arc>, - epoch: Option, + membership: EpochMembership, ) -> Option { let key = vote.signing_key(); @@ -188,12 +178,10 @@ impl< return None; } - let membership_reader = membership.read().await; - let stake_table_entry = CERT::stake_table_entry(&*membership_reader, &key, epoch)?; - let stake_table = CERT::stake_table(&*membership_reader, epoch); - let total_nodes = CERT::total_nodes(&*membership_reader, epoch); - let threshold = CERT::threshold(&*membership_reader, epoch); - drop(membership_reader); + let stake_table_entry = CERT::stake_table_entry(&membership, &key).await?; + let stake_table = CERT::stake_table(&membership).await; + let total_nodes = CERT::total_nodes(&membership).await; + let threshold = CERT::threshold(&membership).await; let vote_node_id = stake_table .iter() diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index 032c82edc5..e1ffbae935 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -15,8 +15,12 @@ use committable::Committable; use futures::future::{select, Either}; use hotshot_types::{ drb::{DrbResult, INITIAL_DRB_RESULT}, + epoch_membership::EpochMembershipCoordinator, message::UpgradeLock, - traits::{block_contents::BlockHeader, network::BroadcastDelay, node_implementation::Versions}, + traits::{ + block_contents::BlockHeader, election::Membership, network::BroadcastDelay, + node_implementation::Versions, + }, }; use rand::Rng; use url::Url; @@ -60,7 +64,6 @@ use hotshot_types::{ simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, traits::{ consensus_api::ConsensusApi, - election::Membership, network::ConnectedNetwork, node_implementation::{ConsensusTime, NodeType}, signature_key::SignatureKey, @@ -112,7 +115,7 @@ pub struct SystemContext, V: Versi pub network: Arc, /// Memberships used by consensus - pub memberships: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// the metrics that the implementor is using. metrics: Arc, @@ -167,7 +170,7 @@ impl, V: Versions> Clone private_key: self.private_key.clone(), config: self.config.clone(), network: Arc::clone(&self.network), - memberships: Arc::clone(&self.memberships), + membership_coordinator: self.membership_coordinator.clone(), metrics: Arc::clone(&self.metrics), consensus: self.consensus.clone(), instance_state: Arc::clone(&self.instance_state), @@ -203,7 +206,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -251,7 +254,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -295,7 +298,11 @@ impl, V: Versions> SystemContext, V: Versions> SystemContext, V: Versions> SystemContext m, + Err(e) => return Err(HotShotError::InvalidState(e.message)), + }; + spawn(async move { - let memberships_da_committee_members = api - .memberships - .read() + let memberships_da_committee_members = membership + .da_committee_members(view_number) .await - .da_committee_members(view_number, epoch) .iter() .cloned() .collect(); @@ -617,7 +627,7 @@ impl, V: Versions> SystemContext::PrivateKey, node_id: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -675,7 +685,7 @@ impl, V: Versions> SystemContext::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -796,7 +806,7 @@ where private_key.clone(), nonce, config.clone(), - Arc::clone(&memberships), + memberships.clone(), Arc::clone(&network), initializer.clone(), metrics.clone(), @@ -857,7 +867,7 @@ where hotshot: Arc::clone(&left_system_context), storage: Arc::clone(&left_system_context.storage), network: Arc::clone(&left_system_context.network), - memberships: Arc::clone(&left_system_context.memberships), + membership_coordinator: left_system_context.membership_coordinator.clone(), epoch_height, }; @@ -869,7 +879,7 @@ where hotshot: Arc::clone(&right_system_context), storage: Arc::clone(&right_system_context.storage), network: Arc::clone(&right_system_context.network), - memberships: Arc::clone(&right_system_context.memberships), + membership_coordinator: right_system_context.membership_coordinator.clone(), epoch_height, }; diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index e69ed4aaa2..544154a2cd 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -10,6 +10,7 @@ pub mod task_state; use std::{collections::BTreeMap, fmt::Debug, num::NonZeroUsize, sync::Arc, time::Duration}; +use crate::EpochMembershipCoordinator; use async_broadcast::{broadcast, RecvError}; use async_lock::RwLock; use async_trait::async_trait; @@ -82,7 +83,7 @@ pub fn add_response_task, V: Versi ) { let state = NetworkResponseState::::new( handle.hotshot.consensus(), - Arc::clone(&handle.memberships), + handle.membership_coordinator.clone(), handle.public_key().clone(), handle.private_key().clone(), handle.hotshot.id, @@ -190,13 +191,12 @@ pub fn add_network_event_task< >( handle: &mut SystemContextHandle, network: Arc, - membership: Arc>, ) { let network_state: NetworkEventTaskState<_, V, _, _> = NetworkEventTaskState { network, view: TYPES::View::genesis(), epoch: genesis_epoch_from_version::(), - membership, + membership_coordinator: handle.membership_coordinator.clone(), storage: Arc::clone(&handle.storage()), consensus: OuterConsensus::new(handle.consensus()), upgrade_lock: handle.hotshot.upgrade_lock.clone(), @@ -322,7 +322,7 @@ where private_key: ::PrivateKey, nonce: u64, config: HotShotConfig, - memberships: Arc>, + memberships: EpochMembershipCoordinator, network: Arc, initializer: HotShotInitializer, metrics: ConsensusMetricsValue, @@ -330,12 +330,13 @@ where marketplace_config: MarketplaceConfig, ) -> SystemContextHandle { let epoch_height = config.epoch_height; + let hotshot = SystemContext::new( public_key, private_key, nonce, config, - memberships, + memberships.clone(), network, initializer, metrics, @@ -357,7 +358,7 @@ where hotshot: Arc::clone(&hotshot), storage: Arc::clone(&hotshot.storage), network: Arc::clone(&hotshot.network), - memberships: Arc::clone(&hotshot.memberships), + membership_coordinator: memberships.clone(), epoch_height, }; @@ -517,9 +518,8 @@ where /// Adds the `NetworkEventTaskState` tasks possibly modifying them as well. fn add_network_event_tasks(&self, handle: &mut SystemContextHandle) { let network = Arc::clone(&handle.network); - let memberships = Arc::clone(&handle.memberships); - self.add_network_event_task(handle, network, memberships); + self.add_network_event_task(handle, network); } /// Adds a `NetworkEventTaskState` task. Can be reimplemented to modify its behaviour. @@ -527,9 +527,8 @@ where &self, handle: &mut SystemContextHandle, channel: Arc<>::Network>, - membership: Arc>, ) { - add_network_event_task(handle, channel, membership); + add_network_event_task(handle, channel); } } @@ -562,9 +561,5 @@ pub async fn add_network_message_and_request_receiver_tasks< pub fn add_network_event_tasks, V: Versions>( handle: &mut SystemContextHandle, ) { - add_network_event_task( - handle, - Arc::clone(&handle.network), - Arc::clone(&handle.memberships), - ); + add_network_event_task(handle, Arc::clone(&handle.network)); } diff --git a/hotshot/src/tasks/task_state.rs b/hotshot/src/tasks/task_state.rs index d574ac91c6..b793d08930 100644 --- a/hotshot/src/tasks/task_state.rs +++ b/hotshot/src/tasks/task_state.rs @@ -52,7 +52,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), view: handle.cur_view().await, delay: handle.hotshot.config.data_request_delay, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -73,7 +73,7 @@ impl, V: Versions> CreateTaskState output_event_stream: handle.hotshot.external_event_stream.0.clone(), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), vote_collectors: BTreeMap::default(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -123,7 +123,7 @@ impl, V: Versions> CreateTaskState cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, network: Arc::clone(&handle.hotshot.network), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), id: handle.hotshot.id, @@ -141,7 +141,7 @@ impl, V: Versions> CreateTaskState Self { consensus: OuterConsensus::new(handle.hotshot.consensus()), output_event_stream: handle.hotshot.external_event_stream.0.clone(), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), network: Arc::clone(&handle.hotshot.network), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, @@ -166,7 +166,7 @@ impl, V: Versions> CreateTaskState cur_view, next_view: cur_view, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), num_timeouts_tracked: 0, @@ -193,7 +193,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(handle.hotshot.consensus()), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), @@ -238,7 +238,7 @@ impl, V: Versions> CreateTaskState latest_voted_view: handle.cur_view().await, vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), - membership: Arc::clone(&handle.hotshot.memberships), + membership: handle.hotshot.membership_coordinator.clone(), drb_computation: None, output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, @@ -265,7 +265,7 @@ impl, V: Versions> CreateTaskState proposal_dependencies: BTreeMap::new(), consensus: OuterConsensus::new(consensus), instance_state: handle.hotshot.instance_state(), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), storage: Arc::clone(&handle.storage), @@ -292,7 +292,7 @@ impl, V: Versions> CreateTaskState consensus: OuterConsensus::new(consensus), cur_view: handle.cur_view().await, cur_epoch: handle.cur_epoch().await, - membership: Arc::clone(&handle.hotshot.memberships), + membership: handle.hotshot.membership_coordinator.clone(), timeout: handle.hotshot.config.next_view_timeout, output_event_stream: handle.hotshot.external_event_stream.0.clone(), storage: Arc::clone(&handle.storage), @@ -316,7 +316,7 @@ impl, V: Versions> CreateTaskState private_key: handle.private_key().clone(), instance_state: handle.hotshot.instance_state(), network: Arc::clone(&handle.hotshot.network), - membership: Arc::clone(&handle.hotshot.memberships), + membership_coordinator: handle.hotshot.membership_coordinator.clone(), vote_collectors: BTreeMap::default(), next_epoch_vote_collectors: BTreeMap::default(), timeout_vote_collectors: BTreeMap::default(), diff --git a/hotshot/src/traits/election/randomized_committee.rs b/hotshot/src/traits/election/randomized_committee.rs index cd3838fa05..b420723642 100644 --- a/hotshot/src/traits/election/randomized_committee.rs +++ b/hotshot/src/traits/election/randomized_committee.rs @@ -217,11 +217,6 @@ impl Membership for Committee { .is_some_and(|x| x.stake_table_entry.stake() > U256::zero()) } - // /// Get the network topic for the committee - // fn committee_topic(&self) -> Topic { - // self.committee_topic.clone() - // } - /// Index the vector of public keys with the current view number fn lookup_leader( &self, @@ -265,6 +260,16 @@ impl Membership for Committee { .unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/randomized_committee_members.rs b/hotshot/src/traits/election/randomized_committee_members.rs index 7a66573096..f1f386be0d 100644 --- a/hotshot/src/traits/election/randomized_committee_members.rs +++ b/hotshot/src/traits/election/randomized_committee_members.rs @@ -446,6 +446,16 @@ impl Membership let len = self.total_nodes(epoch); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/static_committee.rs b/hotshot/src/traits/election/static_committee.rs index 603fc45d44..35bd896f17 100644 --- a/hotshot/src/traits/election/static_committee.rs +++ b/hotshot/src/traits/election/static_committee.rs @@ -270,6 +270,16 @@ impl Membership for StaticCommittee { let len = self.stake_table.len(); NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/static_committee_leader_two_views.rs b/hotshot/src/traits/election/static_committee_leader_two_views.rs index 64b79112e8..ededbe507a 100644 --- a/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -239,6 +239,17 @@ impl Membership for StaticCommitteeLeaderForTwoViews::Epoch>) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } + fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} fn set_first_epoch(&mut self, _epoch: TYPES::Epoch, _initial_drb_result: DrbResult) {} diff --git a/hotshot/src/traits/election/two_static_committees.rs b/hotshot/src/traits/election/two_static_committees.rs index c13200c75e..7e80335a1b 100644 --- a/hotshot/src/traits/election/two_static_committees.rs +++ b/hotshot/src/traits/election/two_static_committees.rs @@ -427,6 +427,16 @@ impl Membership for TwoStaticCommittees { .unwrap() } } + fn has_epoch(&self, _epoch: TYPES::Epoch) -> bool { + true + } + + async fn get_epoch_root( + &self, + _block_height: u64, + ) -> Option<(TYPES::Epoch, TYPES::BlockHeader)> { + None + } fn add_drb_result(&mut self, _epoch: ::Epoch, _drb_result: DrbResult) {} diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 438f790d68..87b4a873c2 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -32,6 +32,7 @@ use hotshot_types::{ COMBINED_NETWORK_MIN_PRIMARY_FAILURES, COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL, }, data::ViewNumber, + epoch_membership::EpochMembershipCoordinator, traits::{ network::{BroadcastDelay, ConnectedNetwork, Topic}, node_implementation::NodeType, @@ -471,7 +472,7 @@ impl ConnectedNetwork for CombinedNetworks &'a self, view: u64, epoch: Option, - membership: Arc>, + membership: EpochMembershipCoordinator, ) where T: NodeType + 'a, { diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 24ae0d9138..93e9c5ef21 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -7,21 +7,7 @@ //! Libp2p based/production networking implementation //! This module provides a libp2p based networking implementation where each node in the //! network forms a tcp or udp connection to a subset of other nodes in the network -#[cfg(feature = "hotshot-testing")] -use std::str::FromStr; -use std::{ - cmp::min, - collections::{BTreeSet, HashSet}, - fmt::Debug, - net::{IpAddr, ToSocketAddrs}, - num::NonZeroUsize, - sync::{ - atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; - +use crate::EpochMembershipCoordinator; use anyhow::{anyhow, Context}; use async_lock::RwLock; use async_trait::async_trait; @@ -52,7 +38,6 @@ use hotshot_types::{ data::ViewNumber, network::NetworkConfig, traits::{ - election::Membership, metrics::{Counter, Gauge, Metrics, NoMetrics}, network::{ConnectedNetwork, NetworkError, Topic}, node_implementation::{ConsensusTime, NodeType}, @@ -66,6 +51,20 @@ use libp2p_identity::{ }; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use serde::Serialize; +#[cfg(feature = "hotshot-testing")] +use std::str::FromStr; +use std::{ + cmp::min, + collections::{BTreeSet, HashSet}, + fmt::Debug, + net::{IpAddr, ToSocketAddrs}, + num::NonZeroUsize, + sync::{ + atomic::{AtomicBool, AtomicU64, Ordering}, + Arc, + }, + time::Duration, +}; use tokio::{ select, spawn, sync::{ @@ -992,14 +991,20 @@ impl ConnectedNetwork for Libp2pNetwork { &'a self, view: u64, epoch: Option, - membership: Arc>, + membership_coordinator: EpochMembershipCoordinator, ) where TYPES: NodeType + 'a, { let future_view = ::View::new(view) + LOOK_AHEAD; let epoch = epoch.map(::Epoch::new); - let future_leader = match membership.read().await.leader(future_view, epoch) { + let membership = match membership_coordinator.membership_for_epoch(epoch).await { + Ok(m) => m, + Err(e) => { + return tracing::warn!(e.message); + } + }; + let future_leader = match membership.leader(future_view).await { Ok(l) => l, Err(e) => { return tracing::info!( diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index f8c82dfad6..07e6bab901 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -6,8 +6,6 @@ //! Provides an event-streaming handle for a [`SystemContext`] running in the background -use std::sync::Arc; - use anyhow::{anyhow, Context, Ok, Result}; use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; @@ -21,17 +19,20 @@ use hotshot_task_impls::{events::HotShotEvent, helpers::broadcast_event}; use hotshot_types::{ consensus::Consensus, data::{Leaf2, QuorumProposalWrapper}, + epoch_membership::EpochMembershipCoordinator, error::HotShotError, message::{Message, MessageKind, Proposal, RecipientList}, request_response::ProposalRequestPayload, traits::{ + block_contents::BlockHeader, consensus_api::ConsensusApi, - election::Membership, network::{BroadcastDelay, ConnectedNetwork, Topic}, node_implementation::NodeType, signature_key::SignatureKey, }, + utils::option_epoch_from_block_number, }; +use std::sync::Arc; use tracing::instrument; use crate::{traits::NodeImplementation, types::Event, SystemContext, Versions}; @@ -68,7 +69,7 @@ pub struct SystemContextHandle, V: pub network: Arc, /// Memberships used by consensus - pub memberships: Arc>, + pub membership_coordinator: EpochMembershipCoordinator, /// Number of blocks in an epoch, zero means there are no epochs pub epoch_height: u64, @@ -155,7 +156,7 @@ impl + 'static, V: Versions> signed_proposal_request.commit().as_ref(), )?; - let mem = Arc::clone(&self.memberships); + let membership_coordinator = self.membership_coordinator.clone(); let receiver = self.internal_event_stream.1.activate_cloned(); let sender = self.internal_event_stream.0.clone(); let epoch_height = self.epoch_height; @@ -185,14 +186,26 @@ impl + 'static, V: Versions> // Then, if it's `Some`, make sure that the data is correct if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { - // Make sure that the quorum_proposal is valid - let mem_reader = mem.read().await; - if let Err(err) = quorum_proposal.validate_signature(&mem_reader, epoch_height) + let maybe_epoch = option_epoch_from_block_number::( + quorum_proposal.data.proposal.epoch.is_some(), + quorum_proposal.data.block_header().block_number(), + epoch_height, + ); + let membership = match membership_coordinator + .membership_for_epoch(maybe_epoch) + .await { + Result::Ok(m) => m, + Err(e) => { + tracing::warn!(e.message); + continue; + } + }; + // Make sure that the quorum_proposal is valid + if let Err(err) = quorum_proposal.validate_signature(&membership).await { tracing::warn!("Invalid Proposal Received after Request. Err {:?}", err); continue; } - drop(mem_reader); let proposed_leaf = Leaf2::from_quorum_proposal(&quorum_proposal.data); let commit = proposed_leaf.commit(); if commit == leaf_commitment { @@ -327,10 +340,11 @@ impl + 'static, V: Versions> epoch_number: Option, ) -> Result { self.hotshot - .memberships - .read() + .membership_coordinator + .membership_for_epoch(epoch_number) + .await? + .leader(view_number) .await - .leader(view_number, epoch_number) .context("Failed to lookup leader") } diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index b975a9d8fe..96be4a2167 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -5167,6 +5167,7 @@ dependencies = [ "ark-serialize 0.4.2", "ark-srs", "ark-std 0.4.0", + "async-broadcast", "async-lock 3.4.0", "async-trait", "bincode", diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index a2eaf1ea35..559f692070 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -18,7 +18,6 @@ use hotshot_events_service::events_source::{ EventFilterSet, EventsSource, EventsStreamer, StartupInfo, }; use hotshot_query_service::data_source::ExtensibleDataSource; -use hotshot_types::traits::election::Membership; use hotshot_types::{ data::ViewNumber, event::Event, @@ -185,14 +184,18 @@ impl, V: Versions, P: SequencerPersistence> &self, epoch: Option<::Epoch>, ) -> Vec::SignatureKey>> { - self.consensus() + let Ok(mem) = self + .consensus() .await .read() .await - .memberships - .read() + .membership_coordinator + .membership_for_epoch(epoch) .await - .stake_table(epoch) + else { + return vec![]; + }; + mem.stake_table().await } /// Get the stake table for the current epoch if not provided diff --git a/sequencer/src/context.rs b/sequencer/src/context.rs index 9b69947f73..25c8fec67b 100644 --- a/sequencer/src/context.rs +++ b/sequencer/src/context.rs @@ -18,6 +18,7 @@ use hotshot_orchestrator::client::OrchestratorClient; use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf2, ViewNumber}, + epoch_membership::EpochMembershipCoordinator, network::NetworkConfig, traits::{ metrics::Metrics, @@ -144,14 +145,16 @@ impl, P: SequencerPersistence, V: Versions> Sequence ))); let persistence = Arc::new(persistence); - let memberships = Arc::new(async_lock::RwLock::new(membership)); + let coordinator = + EpochMembershipCoordinator::new(Arc::new(RwLock::new(membership)), config.epoch_height); + let membership = coordinator.membership().clone(); let handle = SystemContext::init( validator_config.public_key, validator_config.private_key.clone(), instance_state.node_id, config.clone(), - memberships.clone(), + coordinator, network.clone(), initializer, ConsensusMetricsValue::new(metrics), @@ -186,7 +189,9 @@ impl, P: SequencerPersistence, V: Versions> Sequence request_response_config, RequestResponseSender::new(outbound_message_sender), request_response_receiver, - RecipientSource { memberships }, + RecipientSource { + memberships: membership, + }, DataSource {}, ); diff --git a/sequencer/src/message_compat_tests.rs b/sequencer/src/message_compat_tests.rs index 157e2df2d1..08438f08c2 100755 --- a/sequencer/src/message_compat_tests.rs +++ b/sequencer/src/message_compat_tests.rs @@ -48,6 +48,7 @@ async fn test_message_compat(_ver: Ver) { use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ data::vid_disperse::{ADVZDisperse, ADVZDisperseShare}, + epoch_membership::EpochMembershipCoordinator, simple_certificate::{ TimeoutCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, @@ -62,12 +63,15 @@ async fn test_message_compat(_ver: Ver) { let (sender, priv_key) = PubKey::generated_from_seed_indexed(Default::default(), 0); let signature = PubKey::sign(&priv_key, &[]).unwrap(); let committee = vec![PeerConfig::default()]; /* one committee member, necessary to generate a VID share */ - let membership = Arc::new(RwLock::new(EpochCommittees::new_stake( - committee.clone(), - committee, - &NodeState::default(), + let membership = EpochMembershipCoordinator::new( + Arc::new(RwLock::new(EpochCommittees::new_stake( + committee.clone(), + committee, + &NodeState::default(), + 10, + ))), 10, - ))); + ); let upgrade_data = UpgradeProposalData { old_version: Version { major: 0, minor: 1 }, new_version: Version { major: 1, minor: 0 }, diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 52b015a1b1..89d5ef2d25 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -4,7 +4,7 @@ use std::{ num::NonZeroU64, }; -use async_trait::async_trait; +// use async_trait::async_trait; use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::StakersUpdated; use ethers::types::{Address, U256}; use ethers_conv::ToAlloy; @@ -24,6 +24,7 @@ use hotshot_types::{ }, PeerConfig, }; + use itertools::Itertools; use thiserror::Error; @@ -298,7 +299,7 @@ impl EpochCommittees { #[error("Could not lookup leader")] // TODO error variants? message? pub struct LeaderLookupError; -#[async_trait] +// #[async_trait] impl Membership for EpochCommittees { type Error = LeaderLookupError; // DO NOT USE. Dummy constructor to comply w/ trait. @@ -468,6 +469,7 @@ impl Membership for EpochCommittees { .unwrap() } + #[allow(refining_impl_trait)] async fn add_epoch_root( &self, epoch: Epoch, @@ -485,6 +487,14 @@ impl Membership for EpochCommittees { }) } + fn has_epoch(&self, epoch: Epoch) -> bool { + self.state.contains_key(&epoch) + } + + async fn get_epoch_root(&self, _block_height: u64) -> Option<(Epoch, Header)> { + None + } + fn add_drb_result(&mut self, epoch: Epoch, drb: DrbResult) { let Some(raw_stake_table) = self.state.get(&epoch) else { tracing::error!("add_drb_result({}, {:?}) was called, but we do not yet have the stake table for epoch {}", epoch, drb, epoch);