From 7ac8ec96da5920487a47b22733ae4ff44542336d Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 14 Jan 2025 16:08:42 +0500 Subject: [PATCH 001/120] consensus storage migration --- Cargo.lock | 2 +- Cargo.toml | 2 +- .../postgres/V501__epoch_tables.sql | 54 ++ .../migrations/sqlite/V301__epoch_tables.sql | 54 ++ sequencer/src/api.rs | 2 +- sequencer/src/persistence.rs | 55 +- sequencer/src/persistence/fs.rs | 50 +- sequencer/src/persistence/no_storage.rs | 51 +- sequencer/src/persistence/sql.rs | 595 ++++++++++++++++-- sequencer/src/proposal_fetcher.rs | 2 +- types/src/v0/traits.rs | 67 +- 11 files changed, 814 insertions(+), 120 deletions(-) create mode 100644 sequencer/api/migrations/postgres/V501__epoch_tables.sql create mode 100644 sequencer/api/migrations/sqlite/V301__epoch_tables.sql diff --git a/Cargo.lock b/Cargo.lock index a1f5e28679..3a9df9aa6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11795,4 +11795,4 @@ checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", -] +] \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index fe96f875f8..355ea195f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -173,4 +173,4 @@ hotshot-task-impls = { git = "https://www.github.com/EspressoSystems/HotShot.git hotshot-testing = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } hotshot-types = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } libp2p-networking = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } -hotshot-example-types = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } +hotshot-example-types = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } \ No newline at end of file diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql new file mode 100644 index 0000000000..3e45917e10 --- /dev/null +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -0,0 +1,54 @@ +CREATE TABLE anchor_leaf2 ( + view BIGINT PRIMARY KEY, + leaf2 BYTEA, + qc2 BYTEA +); + + +CREATE TABLE da_proposal2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + +CREATE TABLE vid_share2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + + +CREATE TABLE undecided_state2 ( + -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or + -- update that there is only a single entry in this table: the latest known state. + id INT PRIMARY KEY, + + leaves2 BYTEA NOT NULL, + state BYTEA NOT NULL +); + + +CREATE TABLE quorum_proposals2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR, + data BYTEA +); + +CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); +CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); +CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); + +CREATE TABLE quorum_certificate2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BYTEA NOT NULL +); + +CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); + +CREATE TABLE epoch_migration ( + table_name TEXT PRIMARY KEY, + completed bool DEFAULT FALSE +); + +INSERT INTO epoch_migrations("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql new file mode 100644 index 0000000000..e98c1465e7 --- /dev/null +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -0,0 +1,54 @@ +CREATE TABLE anchor_leaf2 ( + view BIGINT PRIMARY KEY, + leaf2 BYTEA, + qc2 BYTEA +); + + +CREATE TABLE da_proposal2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + +CREATE TABLE vid_share2 ( + view BIGINT PRIMARY KEY, + payload_hash VARCHAR, + data BYTEA +); + + +CREATE TABLE undecided_state2 ( + -- The ID is always set to 0. Setting it explicitly allows us to enforce with every insert or + -- update that there is only a single entry in this table: the latest known state. + id INT PRIMARY KEY, + + leaves2 BYTEA NOT NULL, + state BYTEA NOT NULL +); + + +CREATE TABLE quorum_proposals2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR, + data BYTEA, +); + +CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); +CREATE INDEX da_proposal2_payload_hash_idx ON da_proposal (payload_hash); +CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); + +CREATE TABLE quorum_certificate2 ( + view BIGINT PRIMARY KEY, + leaf_hash VARCHAR NOT NULL, + data BYTEA NOT NULL +); + +CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); + +CREATE TABLE epoch_migration ( + table_name TEXT PRIMARY KEY, + completed bool DEFAULT FALSE +); + +INSERT INTO epoch_migrations("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index dee017a6fa..5acf0c8854 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1288,7 +1288,7 @@ mod api_tests { PubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) .expect("Failed to sign quorum_proposal"); persistence - .append_quorum_proposal(&Proposal { + .append_quorum_proposal2(&Proposal { data: quorum_proposal.clone(), signature: quorum_proposal_signature, _pd: Default::default(), diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 725cc74fa4..8f4aefe3f6 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -55,7 +55,10 @@ mod persistence_tests { use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::{DaProposal, EpochNumber, QuorumProposal2, VidDisperseShare, ViewNumber}, + data::{ + DaProposal, DaProposal2, EpochNumber, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, ViewNumber, + }, event::{EventType, HotShotAction, LeafInfo}, message::{Proposal, UpgradeLock}, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate, UpgradeCertificate}, @@ -165,9 +168,7 @@ mod persistence_tests { None ); - let leaf: Leaf2 = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()) - .await - .into(); + let leaf: Leaf2 = Leaf2::genesis(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); let disperse = vid_scheme(2) @@ -175,12 +176,15 @@ mod persistence_tests { .unwrap(); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); let signature = PubKey::sign(&privkey, &[]).unwrap(); - let mut vid = VidDisperseShare:: { + let mut vid = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment: Default::default(), share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, + epoch: EpochNumber::new(0), + target_epoch: EpochNumber::new(0), + data_epoch_payload_commitment: None, }; let mut quorum_proposal = Proposal { data: QuorumProposal2:: { @@ -203,7 +207,7 @@ mod persistence_tests { let vid_share0 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share0).await.unwrap(); + storage.append_vid2(&vid_share0).await.unwrap(); assert_eq!( storage.load_vid_share(ViewNumber::new(0)).await.unwrap(), @@ -213,7 +217,7 @@ mod persistence_tests { vid.view_number = ViewNumber::new(1); let vid_share1 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share1).await.unwrap(); + storage.append_vid2(&vid_share1).await.unwrap(); assert_eq!( storage.load_vid_share(vid.view_number).await.unwrap(), @@ -223,7 +227,7 @@ mod persistence_tests { vid.view_number = ViewNumber::new(2); let vid_share2 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share2).await.unwrap(); + storage.append_vid2(&vid_share2).await.unwrap(); assert_eq!( storage.load_vid_share(vid.view_number).await.unwrap(), @@ -233,7 +237,7 @@ mod persistence_tests { vid.view_number = ViewNumber::new(3); let vid_share3 = vid.clone().to_proposal(&privkey).unwrap().clone(); - storage.append_vid(&vid_share3).await.unwrap(); + storage.append_vid2(&vid_share3).await.unwrap(); assert_eq!( storage.load_vid_share(vid.view_number).await.unwrap(), @@ -243,10 +247,11 @@ mod persistence_tests { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: EpochNumber::new(0), }; let da_proposal = Proposal { @@ -258,7 +263,7 @@ mod persistence_tests { let vid_commitment = vid_commitment(&leaf_payload_bytes_arc, 2); storage - .append_da(&da_proposal, vid_commitment) + .append_da2(&da_proposal, vid_commitment) .await .unwrap(); @@ -270,7 +275,7 @@ mod persistence_tests { let mut da_proposal1 = da_proposal.clone(); da_proposal1.data.view_number = ViewNumber::new(1); storage - .append_da(&da_proposal1.clone(), vid_commitment) + .append_da2(&da_proposal1.clone(), vid_commitment) .await .unwrap(); @@ -285,7 +290,7 @@ mod persistence_tests { let mut da_proposal2 = da_proposal1.clone(); da_proposal2.data.view_number = ViewNumber::new(2); storage - .append_da(&da_proposal2.clone(), vid_commitment) + .append_da2(&da_proposal2.clone(), vid_commitment) .await .unwrap(); @@ -300,7 +305,7 @@ mod persistence_tests { let mut da_proposal3 = da_proposal2.clone(); da_proposal3.data.view_number = ViewNumber::new(3); storage - .append_da(&da_proposal3.clone(), vid_commitment) + .append_da2(&da_proposal3.clone(), vid_commitment) .await .unwrap(); @@ -314,7 +319,7 @@ mod persistence_tests { let quorum_proposal1 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal1) + .append_quorum_proposal2(&quorum_proposal1) .await .unwrap(); @@ -326,7 +331,7 @@ mod persistence_tests { quorum_proposal.data.view_number = ViewNumber::new(1); let quorum_proposal2 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal2) + .append_quorum_proposal2(&quorum_proposal2) .await .unwrap(); @@ -342,7 +347,7 @@ mod persistence_tests { quorum_proposal.data.justify_qc.view_number = ViewNumber::new(1); let quorum_proposal3 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal3) + .append_quorum_proposal2(&quorum_proposal3) .await .unwrap(); @@ -361,7 +366,7 @@ mod persistence_tests { // This one should stick around after GC runs. let quorum_proposal4 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal4) + .append_quorum_proposal2(&quorum_proposal4) .await .unwrap(); @@ -832,12 +837,15 @@ mod persistence_tests { .unwrap(); let payload_commitment = disperse.commit; let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = VidDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, + epoch: EpochNumber::new(0), + target_epoch: EpochNumber::new(0), + data_epoch_payload_commitment: None, } .to_proposal(&privkey) .unwrap() @@ -869,22 +877,23 @@ mod persistence_tests { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: EpochNumber::new(0), }, signature: block_payload_signature, _pd: Default::default(), }; storage - .append_da(&da_proposal, payload_commitment) + .append_da2(&da_proposal, payload_commitment) .await .unwrap(); - storage.append_vid(&vid_share).await.unwrap(); + storage.append_vid2(&vid_share).await.unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index b31bf4ba32..161eba67f4 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -8,7 +8,10 @@ use espresso_types::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, + }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -381,7 +384,7 @@ impl Inner { fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let dir_path = self.da_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -392,7 +395,7 @@ impl Inner { let da_bytes = fs::read(file_path)?; - let da_proposal: Proposal> = + let da_proposal: Proposal> = bincode::deserialize(&da_bytes)?; Ok(Some(da_proposal)) } @@ -400,7 +403,7 @@ impl Inner { fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let dir_path = self.vid_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -410,7 +413,7 @@ impl Inner { } let vid_share_bytes = fs::read(file_path)?; - let vid_share: Proposal> = + let vid_share: Proposal> = bincode::deserialize(&vid_share_bytes)?; Ok(Some(vid_share)) } @@ -596,14 +599,14 @@ impl SequencerPersistence for Persistence { async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_da_proposal(view) } async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_vid_share(view) } @@ -713,7 +716,7 @@ impl SequencerPersistence for Persistence { }, ) } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { @@ -882,14 +885,33 @@ impl SequencerPersistence for Persistence { )) } - async fn migrate_consensus( + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn append_da2( &self, - _migrate_leaf: fn(Leaf) -> Leaf2, - _migrate_proposal: fn( - Proposal>, - ) -> Proposal>, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, ) -> anyhow::Result<()> { - // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/2357 Ok(()) } } diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index 697fa8bbda..13c47bcf52 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -9,7 +9,10 @@ use espresso_types::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, + }, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, @@ -97,14 +100,14 @@ impl SequencerPersistence for NoStorage { async fn load_da_proposal( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } async fn load_vid_share( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } @@ -148,7 +151,7 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result<()> { Ok(()) } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, _proposal: &Proposal>, ) -> anyhow::Result<()> { @@ -161,16 +164,6 @@ impl SequencerPersistence for NoStorage { Ok(()) } - async fn migrate_consensus( - &self, - _: fn(Leaf) -> Leaf2, - _: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()> { - Ok(()) - } - async fn store_next_epoch_quorum_certificate( &self, _high_qc: NextEpochQuorumCertificate2, @@ -183,4 +176,34 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result>> { Ok(None) } + + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + Ok(()) + } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + Ok(()) + } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 8cc54e63a5..7f3e6685f8 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -20,7 +20,7 @@ use hotshot_query_service::{ Transaction, TransactionMode, Write, }, }, - Transaction as _, VersionedDataSource, + SqlDataSource, Transaction as _, VersionedDataSource, }, fetching::{ request::{LeafRequest, PayloadRequest, VidCommonRequest}, @@ -29,9 +29,13 @@ use hotshot_query_service::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, + }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, + qc, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, @@ -40,10 +44,11 @@ use hotshot_types::{ node_implementation::ConsensusTime, }, utils::View, - vid::{VidCommitment, VidCommon}, + vid::{VidCommitment, VidCommon, VidSchemeType}, vote::HasViewNumber, }; use itertools::Itertools; +use jf_vid::VidScheme; use sqlx::Row; use sqlx::{query, Executor}; use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; @@ -878,11 +883,11 @@ impl Persistence { } const PRUNE_TABLES: &[&str] = &[ - "anchor_leaf", - "vid_share", - "da_proposal", - "quorum_proposals", - "quorum_certificate", + "anchor_leaf2", + "vid_share2", + "da_proposal2", + "quorum_proposals2", + "quorum_certificate2", ]; async fn prune_to_view(tx: &mut Transaction, view: u64) -> anyhow::Result<()> { @@ -959,14 +964,12 @@ impl SequencerPersistence for Persistence { // because we already store it separately, as part of the DA proposal. Storing it // here contributes to load on the DB for no reason, so we remove it before // serializing the leaf. - let mut leaf = downgrade_leaf(info.leaf.clone()); + let mut leaf = info.leaf.clone(); leaf.unfill_block_payload(); - let qc = qc2.to_qc(); - - let view = qc.view_number.u64() as i64; + let view = qc2.view_number.u64() as i64; let leaf_bytes = bincode::serialize(&leaf)?; - let qc_bytes = bincode::serialize(&qc)?; + let qc_bytes = bincode::serialize(&qc2)?; Ok((view, leaf_bytes, qc_bytes)) }) .collect::>>()?; @@ -975,7 +978,7 @@ impl SequencerPersistence for Persistence { // event consumer later fails, there is no need to abort the storage of the leaves. let mut tx = self.db.write().await?; - tx.upsert("anchor_leaf", ["view", "leaf", "qc"], ["view"], values) + tx.upsert("anchor_leaf2", ["view", "leaf2", "qc2"], ["view"], values) .await?; tx.commit().await?; @@ -1018,19 +1021,17 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaf, qc FROM anchor_leaf ORDER BY view DESC LIMIT 1") + .fetch_optional("SELECT leaf2, qc2 FROM anchor_leaf2 ORDER BY view DESC LIMIT 1") .await? else { return Ok(None); }; - let leaf_bytes: Vec = row.get("leaf"); - let leaf: Leaf = bincode::deserialize(&leaf_bytes)?; - let leaf2: Leaf2 = leaf.into(); + let leaf_bytes: Vec = row.get("leaf2"); + let leaf2: Leaf2 = bincode::deserialize(&leaf_bytes)?; - let qc_bytes: Vec = row.get("qc"); - let qc: QuorumCertificate = bincode::deserialize(&qc_bytes)?; - let qc2 = qc.to_qc2(); + let qc_bytes: Vec = row.get("qc2"); + let qc2: QuorumCertificate2 = bincode::deserialize(&qc_bytes)?; Ok(Some((leaf2, qc2))) } @@ -1050,15 +1051,14 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaves, state FROM undecided_state WHERE id = 0") + .fetch_optional("SELECT leaves, state FROM undecided_state2 WHERE id = 0") .await? else { return Ok(None); }; let leaves_bytes: Vec = row.get("leaves"); - let leaves: CommitmentMap = bincode::deserialize(&leaves_bytes)?; - let leaves2 = upgrade_commitment_map(leaves); + let leaves2: CommitmentMap = bincode::deserialize(&leaves_bytes)?; let state_bytes: Vec = row.get("state"); let state = bincode::deserialize(&state_bytes)?; @@ -1069,7 +1069,7 @@ impl SequencerPersistence for Persistence { async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let result = self .db .read() @@ -1090,7 +1090,7 @@ impl SequencerPersistence for Persistence { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let result = self .db .read() @@ -1115,7 +1115,7 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_all("SELECT * FROM quorum_proposals") + .fetch_all("SELECT * FROM quorum_proposals2") .await?; Ok(BTreeMap::from_iter( @@ -1124,9 +1124,9 @@ impl SequencerPersistence for Persistence { let view: i64 = row.get("view"); let view_number: ViewNumber = ViewNumber::new(view.try_into()?); let bytes: Vec = row.get("data"); - let proposal: Proposal> = + let proposal: Proposal> = bincode::deserialize(&bytes)?; - Ok((view_number, convert_proposal(proposal))) + Ok((view_number, proposal)) }) .collect::>>()?, )) @@ -1138,12 +1138,12 @@ impl SequencerPersistence for Persistence { ) -> anyhow::Result>> { let mut tx = self.db.read().await?; let (data,) = - query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE view = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_proposals2 WHERE view = $1 LIMIT 1") .bind(view.u64() as i64) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - let proposal = convert_proposal(proposal); + let proposal: Proposal> = bincode::deserialize(&data)?; + Ok(proposal) } @@ -1206,8 +1206,6 @@ impl SequencerPersistence for Persistence { leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()> { - let leaves = downgrade_commitment_map(leaves); - if !self.store_undecided_state { return Ok(()); } @@ -1217,7 +1215,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( - "undecided_state", + "undecided_state2", ["id", "leaves", "state"], ["id"], [(0_i32, leaves_bytes, state_bytes)], @@ -1225,18 +1223,16 @@ impl SequencerPersistence for Persistence { .await?; tx.commit().await } - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let proposal: Proposal> = - convert_proposal(proposal.clone()); let view_number = proposal.data.view_number().u64(); let proposal_bytes = bincode::serialize(&proposal).context("serializing proposal")?; - let leaf_hash = Committable::commit(&Leaf::from_quorum_proposal(&proposal.data)); + let leaf_hash = Committable::commit(&Leaf2::from_quorum_proposal(&proposal.data)); let mut tx = self.db.write().await?; tx.upsert( - "quorum_proposals", + "quorum_proposals2", ["view", "leaf_hash", "data"], ["view"], [(view_number as i64, leaf_hash.to_string(), proposal_bytes)], @@ -1247,7 +1243,7 @@ impl SequencerPersistence for Persistence { let justify_qc = &proposal.data.justify_qc; let justify_qc_bytes = bincode::serialize(&justify_qc).context("serializing QC")?; tx.upsert( - "quorum_certificate", + "quorum_certificate2", ["view", "leaf_hash", "data"], ["view"], [( @@ -1300,14 +1296,432 @@ impl SequencerPersistence for Persistence { tx.commit().await } - async fn migrate_consensus( - &self, - _migrate_leaf: fn(Leaf) -> Leaf2, - _migrate_proposal: fn( - Proposal>, - ) -> Proposal>, - ) -> anyhow::Result<()> { - // TODO: https://github.com/EspressoSystems/espresso-sequencer/issues/2357 + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + let batch_size: i64 = 1000; + let mut offset: i64 = 0; + + loop { + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT is_completed from epoch_migration WHERE table_name = anchor_leaf", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("anchor leaf migration already done"); + + return Ok(()); + } + + let rows = query("SELECT leaf, qc FROM anchor_leaf ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf = row.try_get("leaf")?; + let qc = row.try_get("qc")?; + let leaf1: Leaf = serde_json::from_value(leaf)?; + let qc1: QuorumCertificate = serde_json::from_value(qc)?; + let view: i64 = row.try_get("view")?; + + let leaf2: Leaf2 = leaf1.into(); + let qc2: QuorumCertificate2 = qc1.to_qc2(); + + let leaf2_bytes = bincode::serialize(&leaf2)?; + let qc2_bytes = bincode::serialize(&qc2)?; + + values.push((view, leaf2_bytes, qc2_bytes)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO anchor_leaf2 (view, leaf2, qc2) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf, qc)| { + b.push_bind(view).push_bind(leaf).push_bind(qc); + }); + + offset += batch_size; + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + tx.commit().await?; + + if rows.len() < batch_size as usize { + break; + } + } + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "is_completed"], + ["table_name"], + [("anchor_leaf".to_string(), true)], + ) + .await?; + tx.commit().await?; + + Ok(()) + } + + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + let batch_size: i64 = 1000; + let mut offset: i64 = 0; + + loop { + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT is_completed from epoch_migration WHERE table_name = da_proposal", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("da proposals migration already done"); + + return Ok(()); + } + + let rows = query( + "SELECT payload_hash, data FROM da_proposal ORDER BY view LIMIT $1 OFFSET $2", + ) + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + let mut values = Vec::new(); + + for row in rows.iter() { + let data = row.try_get("data")?; + let payload_hash: String = row.try_get("payload_hash")?; + + let da_proposal: DaProposal = serde_json::from_value(data)?; + let da_proposal2: DaProposal2 = da_proposal.into(); + + let view = da_proposal2.view_number.u64() as i64; + let data = bincode::serialize(&da_proposal2)?; + + values.push((view, payload_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO da_proposal2 (view, payload_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, payload_hash, data)| { + b.push_bind(view).push_bind(payload_hash).push_bind(data); + }); + + offset += batch_size; + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + + if rows.len() < batch_size as usize { + break; + } + } + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "is_completed"], + ["table_name"], + [("da_proposal".to_string(), true)], + ) + .await?; + tx.commit().await?; + + Ok(()) + } + + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + let batch_size: i64 = 1000; + let mut offset: i64 = 0; + + loop { + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT is_completed from epoch_migration WHERE table_name = vid_share", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("vid_share migration already done"); + + return Ok(()); + } + + let rows = + query("SELECT payload_hash, data FROM vid_share ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + let mut values = Vec::new(); + + for row in rows.iter() { + let data = row.try_get("data")?; + let payload_hash: String = row.try_get("payload_hash")?; + + let vid_share: VidDisperseShare = serde_json::from_value(data)?; + let vid_share2: VidDisperseShare2 = vid_share.into(); + + let view = vid_share2.view_number().u64() as i64; + let data = bincode::serialize(&vid_share2)?; + + values.push((view, payload_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO vid_share2 (view, payload_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, payload_hash, data)| { + b.push_bind(view).push_bind(payload_hash).push_bind(data); + }); + + offset += batch_size; + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + + if rows.len() < batch_size as usize { + break; + } + } + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "is_completed"], + ["table_name"], + [("vid_share".to_string(), true)], + ) + .await?; + tx.commit().await?; + + Ok(()) + } + + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + let mut tx = self.db.read().await?; + + let row = tx + .fetch_optional("SELECT leaves, state FROM undecided_state WHERE id = 0") + .await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT is_completed from epoch_migration WHERE table_name = undecided_state", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("undecided state migration already done"); + + return Ok(()); + } + + if let Some(row) = row { + let leaves_bytes: Vec = row.try_get("leaves")?; + let leaves: CommitmentMap = bincode::deserialize(&leaves_bytes)?; + + let leaves2 = upgrade_commitment_map(leaves); + let leaves2_bytes = bincode::serialize(&leaves2)?; + let state_bytes: Vec = row.try_get("state")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "undecided_state2", + ["id", "leaves", "state"], + ["id"], + [(0_i32, leaves2_bytes, state_bytes)], + ) + .await?; + }; + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "is_completed"], + ["table_name"], + [("undecided_state".to_string(), true)], + ) + .await?; + tx.commit().await?; + + Ok(()) + } + + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + let batch_size: i64 = 1000; + let mut offset: i64 = 0; + + loop { + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT is_completed from epoch_migration WHERE table_name = quorum_proposals", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!("quorum proposals migration already done"); + + return Ok(()); + } + + let rows = + query("SELECT view, leaf_hash, data FROM quorum_proposals2 ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf_hash: String = row.try_get("leaf_hash")?; + let data = row.try_get("data")?; + + let quorum_proposal: QuorumProposal = serde_json::from_value(data)?; + let quorum_proposal2: QuorumProposal2 = quorum_proposal.into(); + + let view = quorum_proposal2.view_number().u64() as i64; + let data = bincode::serialize(&quorum_proposal2)?; + + values.push((view, leaf_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO quorum_proposals2 (view, leaf_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf_hash, data)| { + b.push_bind(view).push_bind(leaf_hash).push_bind(data); + }); + + offset += batch_size; + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + + if rows.len() < batch_size as usize { + break; + } + } + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "is_completed"], + ["table_name"], + [("quorum_proposals".to_string(), true)], + ) + .await?; + tx.commit().await?; + + Ok(()) + } + + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + let batch_size: i64 = 1000; + let mut offset: i64 = 0; + + loop { + let mut tx = self.db.read().await?; + + let (is_completed,) = query_as::<(bool,)>( + "SELECT is_completed from epoch_migration WHERE table_name = quorum_certificates", + ) + .fetch_one(tx.as_mut()) + .await?; + + if is_completed { + tracing::info!(" quorum certificates migration already done"); + + return Ok(()); + } + + let rows = + query("SELECT view, leaf_hash, data FROM quorum_certificate ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + let mut values = Vec::new(); + + for row in rows.iter() { + let leaf_hash: String = row.try_get("leaf_hash")?; + let data = row.try_get("data")?; + + let qc: QuorumCertificate = serde_json::from_value(data)?; + let qc2: QuorumCertificate2 = qc.to_qc2(); + + let view = qc2.view_number().u64() as i64; + let data = bincode::serialize(&qc2)?; + + values.push((view, leaf_hash, data)); + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO quorum_certificate2 (view, leaf_hash, data) "); + + query_builder.push_values(values.into_iter(), |mut b, (view, leaf_hash, data)| { + b.push_bind(view).push_bind(leaf_hash).push_bind(data); + }); + + offset += batch_size; + + let query = query_builder.build(); + + let mut tx = self.db.write().await?; + query.execute(tx.as_mut()).await?; + + if rows.len() < batch_size as usize { + break; + } + } + + let mut tx = self.db.write().await?; + tx.upsert( + "epoch_migration", + ["table_name", "is_completed"], + ["table_name"], + [("quorum_certificates".to_string(), true)], + ) + .await?; + tx.commit().await?; + Ok(()) } @@ -1344,6 +1758,68 @@ impl SequencerPersistence for Persistence { }) .transpose() } + + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + let view = proposal.data.view_number.u64(); + let payload_hash = proposal.data.payload_commitment; + let data_bytes = bincode::serialize(proposal).unwrap(); + + let mut tx = self.db.write().await?; + tx.upsert( + "vid_share2", + ["view", "data", "payload_hash"], + ["view"], + [(view as i64, data_bytes, payload_hash.to_string())], + ) + .await?; + tx.commit().await + } + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> anyhow::Result<()> { + let data = &proposal.data; + let view = data.view_number().u64(); + let data_bytes = bincode::serialize(proposal).unwrap(); + + let mut tx = self.db.write().await?; + tx.upsert( + "da_proposal2", + ["view", "data", "payload_hash"], + ["view"], + [(view as i64, data_bytes, vid_commit.to_string())], + ) + .await?; + tx.commit().await + } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + if !self.store_undecided_state { + return Ok(()); + } + + let leaves_bytes = bincode::serialize(&leaves).context("serializing leaves")?; + let state_bytes = bincode::serialize(&state).context("serializing state")?; + + let mut tx = self.db.write().await?; + tx.upsert( + "undecided_state2", + ["id", "leaves", "state"], + ["id"], + [(0_i32, leaves_bytes, state_bytes)], + ) + .await?; + tx.commit().await + } } #[async_trait] @@ -1544,6 +2020,7 @@ mod test { use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ + data::EpochNumber, simple_certificate::QuorumCertificate, traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, vid::vid_scheme, @@ -1700,13 +2177,13 @@ mod test { .unwrap(); storage.append_vid(&vid_share).await.unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); // Add an extra quorum proposal so we have a QC pointing back at `leaf`. storage - .append_quorum_proposal(&next_quorum_proposal) + .append_quorum_proposal2(&next_quorum_proposal) .await .unwrap(); @@ -1756,7 +2233,7 @@ mod test { let data_view = ViewNumber::new(1); // Populate some data. - let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = Leaf2::genesis(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); @@ -1765,12 +2242,15 @@ mod test { .unwrap(); let payload_commitment = vid_commitment(&leaf_payload_bytes_arc, 2); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid = VidDisperseShare:: { + let vid = VidDisperseShare2:: { view_number: data_view, payload_commitment, share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, + epoch: EpochNumber::new(0), + target_epoch: EpochNumber::new(0), + data_epoch_payload_commitment: None, } .to_proposal(&privkey) .unwrap() @@ -1801,23 +2281,24 @@ mod test { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: data_view, + epoch: EpochNumber::new(0), }, signature: block_payload_signature, _pd: Default::default(), }; tracing::info!(?vid, ?da_proposal, ?quorum_proposal, "append data"); - storage.append_vid(&vid).await.unwrap(); + storage.append_vid2(&vid).await.unwrap(); storage - .append_da(&da_proposal, payload_commitment) + .append_da2(&da_proposal, payload_commitment) .await .unwrap(); storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs index 48b7ba6a2d..1d1cbefe74 100644 --- a/sequencer/src/proposal_fetcher.rs +++ b/sequencer/src/proposal_fetcher.rs @@ -196,7 +196,7 @@ where .context("timed out fetching proposal")? .context("error fetching proposal")?; self.persistence - .append_quorum_proposal(&proposal) + .append_quorum_proposal2(&proposal) .await .context("error saving fetched proposal")?; diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index cb42bc0812..32a9f32b6b 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -10,7 +10,8 @@ use hotshot::{types::EventType, HotShotInitializer}; use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, EpochNumber, QuorumProposal, QuorumProposal2, VidDisperseShare, ViewNumber, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, VidDisperseShare, + VidDisperseShare2, ViewNumber, }, event::{HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -461,11 +462,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_da_proposal( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_upgrade_certificate( &self, ) -> anyhow::Result>>; @@ -676,7 +677,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()>; - async fn append_quorum_proposal( + async fn append_quorum_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()>; @@ -686,11 +687,37 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { ) -> anyhow::Result<()>; async fn migrate_consensus( &self, - migrate_leaf: fn(Leaf) -> Leaf2, - migrate_proposal: fn( + _migrate_leaf: fn(Leaf) -> Leaf2, + _migrate_proposal: fn( Proposal>, ) -> Proposal>, - ) -> anyhow::Result<()>; + ) -> anyhow::Result<()> { + self.migrate_anchor_leaf().await?; + self.migrate_da_proposals().await?; + self.migrate_vid_shares().await?; + self.migrate_undecided_state().await?; + self.migrate_quorum_proposals().await?; + self.migrate_quorum_certificates().await + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + Ok(()) + } async fn load_anchor_view(&self) -> anyhow::Result { match self.load_anchor_leaf().await? { @@ -707,6 +734,30 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_next_epoch_quorum_certificate( &self, ) -> anyhow::Result>>; + + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()>; + + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> anyhow::Result<()>; + + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + self.append_quorum_proposal2(proposal).await + } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()>; } #[async_trait] @@ -783,7 +834,7 @@ impl Storage for Arc

{ proposal: &Proposal>, ) -> anyhow::Result<()> { (**self) - .append_quorum_proposal(&convert_proposal(proposal.clone())) + .append_quorum_proposal2(&convert_proposal(proposal.clone())) .await } From d58bb1d34153102dd25371d2f4ab618c2ccde988 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 14 Jan 2025 17:35:38 +0500 Subject: [PATCH 002/120] fix migrations and commit transaction after batch insert --- sequencer/api/migrations/postgres/V501__epoch_tables.sql | 2 +- sequencer/api/migrations/sqlite/V301__epoch_tables.sql | 4 ++-- sequencer/src/persistence/sql.rs | 8 +++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql index 3e45917e10..987803be36 100644 --- a/sequencer/api/migrations/postgres/V501__epoch_tables.sql +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -51,4 +51,4 @@ CREATE TABLE epoch_migration ( completed bool DEFAULT FALSE ); -INSERT INTO epoch_migrations("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file +INSERT INTO epoch_migrations ("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql index e98c1465e7..7698c68253 100644 --- a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -31,7 +31,7 @@ CREATE TABLE undecided_state2 ( CREATE TABLE quorum_proposals2 ( view BIGINT PRIMARY KEY, leaf_hash VARCHAR, - data BYTEA, + data BYTEA ); CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); @@ -51,4 +51,4 @@ CREATE TABLE epoch_migration ( completed bool DEFAULT FALSE ); -INSERT INTO epoch_migrations("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file +INSERT INTO epoch_migration ("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 7f3e6685f8..bab5651a6e 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1432,6 +1432,8 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; query.execute(tx.as_mut()).await?; + tx.commit().await?; + if rows.len() < batch_size as usize { break; } @@ -1506,7 +1508,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; query.execute(tx.as_mut()).await?; - + tx.commit().await?; if rows.len() < batch_size as usize { break; } @@ -1631,7 +1633,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; query.execute(tx.as_mut()).await?; - + tx.commit().await?; if rows.len() < batch_size as usize { break; } @@ -1706,7 +1708,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; query.execute(tx.as_mut()).await?; - + tx.commit().await?; if rows.len() < batch_size as usize { break; } From d1dd38e05b0416cc5495cf3abf2fd8ed240bedd5 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 15 Jan 2025 16:13:54 +0500 Subject: [PATCH 003/120] sqlite migration fixes and fix deserialization errors --- .../migrations/sqlite/V301__epoch_tables.sql | 16 +- sequencer/src/persistence/sql.rs | 272 +++++++++++++++--- 2 files changed, 244 insertions(+), 44 deletions(-) diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql index 7698c68253..c9092690e7 100644 --- a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -1,20 +1,20 @@ CREATE TABLE anchor_leaf2 ( view BIGINT PRIMARY KEY, - leaf2 BYTEA, - qc2 BYTEA + leaf2 BLOB, + qc2 BLOB ); CREATE TABLE da_proposal2 ( view BIGINT PRIMARY KEY, payload_hash VARCHAR, - data BYTEA + data BLOB ); CREATE TABLE vid_share2 ( view BIGINT PRIMARY KEY, payload_hash VARCHAR, - data BYTEA + data BLOB ); @@ -23,15 +23,15 @@ CREATE TABLE undecided_state2 ( -- update that there is only a single entry in this table: the latest known state. id INT PRIMARY KEY, - leaves2 BYTEA NOT NULL, - state BYTEA NOT NULL + leaves BLOB NOT NULL, + state BLOB NOT NULL ); CREATE TABLE quorum_proposals2 ( view BIGINT PRIMARY KEY, leaf_hash VARCHAR, - data BYTEA + data BLOB ); CREATE UNIQUE INDEX quorum_proposals2_leaf_hash_idx ON quorum_proposals (leaf_hash); @@ -41,7 +41,7 @@ CREATE INDEX vid_share2_payload_hash_idx ON vid_share (payload_hash); CREATE TABLE quorum_certificate2 ( view BIGINT PRIMARY KEY, leaf_hash VARCHAR NOT NULL, - data BYTEA NOT NULL + data BLOB NOT NULL ); CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash); diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index bab5651a6e..272f89044a 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1304,7 +1304,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.read().await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT is_completed from epoch_migration WHERE table_name = anchor_leaf", + "SELECT completed from epoch_migration WHERE table_name = 'anchor_leaf'", ) .fetch_one(tx.as_mut()) .await?; @@ -1315,21 +1315,24 @@ impl SequencerPersistence for Persistence { return Ok(()); } - let rows = query("SELECT leaf, qc FROM anchor_leaf ORDER BY view LIMIT $1 OFFSET $2") - .bind(batch_size) - .bind(offset) - .fetch_all(tx.as_mut()) - .await?; + let rows = + query("SELECT view, leaf, qc FROM anchor_leaf ORDER BY view LIMIT $1 OFFSET $2") + .bind(batch_size) + .bind(offset) + .fetch_all(tx.as_mut()) + .await?; drop(tx); - + if rows.is_empty() { + break; + } let mut values = Vec::new(); for row in rows.iter() { - let leaf = row.try_get("leaf")?; - let qc = row.try_get("qc")?; - let leaf1: Leaf = serde_json::from_value(leaf)?; - let qc1: QuorumCertificate = serde_json::from_value(qc)?; + let leaf: Vec = row.try_get("leaf")?; + let qc: Vec = row.try_get("qc")?; + let leaf1: Leaf = bincode::deserialize(&leaf)?; + let qc1: QuorumCertificate = bincode::deserialize(&qc)?; let view: i64 = row.try_get("view")?; let leaf2: Leaf2 = leaf1.into(); @@ -1364,7 +1367,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "epoch_migration", - ["table_name", "is_completed"], + ["table_name", "completed"], ["table_name"], [("anchor_leaf".to_string(), true)], ) @@ -1382,7 +1385,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.read().await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT is_completed from epoch_migration WHERE table_name = da_proposal", + "SELECT completed from epoch_migration WHERE table_name = 'da_proposal'", ) .fetch_one(tx.as_mut()) .await?; @@ -1402,14 +1405,16 @@ impl SequencerPersistence for Persistence { .await?; drop(tx); - + if rows.is_empty() { + break; + } let mut values = Vec::new(); for row in rows.iter() { - let data = row.try_get("data")?; + let data: Vec = row.try_get("data")?; let payload_hash: String = row.try_get("payload_hash")?; - let da_proposal: DaProposal = serde_json::from_value(data)?; + let da_proposal: DaProposal = bincode::deserialize(&data)?; let da_proposal2: DaProposal2 = da_proposal.into(); let view = da_proposal2.view_number.u64() as i64; @@ -1442,7 +1447,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "epoch_migration", - ["table_name", "is_completed"], + ["table_name", "completed"], ["table_name"], [("da_proposal".to_string(), true)], ) @@ -1460,7 +1465,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.read().await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT is_completed from epoch_migration WHERE table_name = vid_share", + "SELECT completed from epoch_migration WHERE table_name = 'vid_share'", ) .fetch_one(tx.as_mut()) .await?; @@ -1479,14 +1484,16 @@ impl SequencerPersistence for Persistence { .await?; drop(tx); - + if rows.is_empty() { + break; + } let mut values = Vec::new(); for row in rows.iter() { - let data = row.try_get("data")?; + let data: Vec = row.try_get("data")?; let payload_hash: String = row.try_get("payload_hash")?; - let vid_share: VidDisperseShare = serde_json::from_value(data)?; + let vid_share: VidDisperseShare = bincode::deserialize(&data)?; let vid_share2: VidDisperseShare2 = vid_share.into(); let view = vid_share2.view_number().u64() as i64; @@ -1517,7 +1524,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "epoch_migration", - ["table_name", "is_completed"], + ["table_name", "completed"], ["table_name"], [("vid_share".to_string(), true)], ) @@ -1535,7 +1542,7 @@ impl SequencerPersistence for Persistence { .await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT is_completed from epoch_migration WHERE table_name = undecided_state", + "SELECT completed from epoch_migration WHERE table_name = 'undecided_state'", ) .fetch_one(tx.as_mut()) .await?; @@ -1567,7 +1574,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "epoch_migration", - ["table_name", "is_completed"], + ["table_name", "completed"], ["table_name"], [("undecided_state".to_string(), true)], ) @@ -1585,7 +1592,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.read().await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT is_completed from epoch_migration WHERE table_name = quorum_proposals", + "SELECT completed from epoch_migration WHERE table_name = 'quorum_proposals'", ) .fetch_one(tx.as_mut()) .await?; @@ -1597,7 +1604,7 @@ impl SequencerPersistence for Persistence { } let rows = - query("SELECT view, leaf_hash, data FROM quorum_proposals2 ORDER BY view LIMIT $1 OFFSET $2") + query("SELECT view, leaf_hash, data FROM quorum_proposals ORDER BY view LIMIT $1 OFFSET $2") .bind(batch_size) .bind(offset) .fetch_all(tx.as_mut()) @@ -1605,13 +1612,17 @@ impl SequencerPersistence for Persistence { drop(tx); + if rows.is_empty() { + break; + } + let mut values = Vec::new(); for row in rows.iter() { let leaf_hash: String = row.try_get("leaf_hash")?; - let data = row.try_get("data")?; + let data: Vec = row.try_get("data")?; - let quorum_proposal: QuorumProposal = serde_json::from_value(data)?; + let quorum_proposal: QuorumProposal = bincode::deserialize(&data)?; let quorum_proposal2: QuorumProposal2 = quorum_proposal.into(); let view = quorum_proposal2.view_number().u64() as i64; @@ -1642,7 +1653,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "epoch_migration", - ["table_name", "is_completed"], + ["table_name", "completed"], ["table_name"], [("quorum_proposals".to_string(), true)], ) @@ -1660,7 +1671,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.read().await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT is_completed from epoch_migration WHERE table_name = quorum_certificates", + "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificates'", ) .fetch_one(tx.as_mut()) .await?; @@ -1679,14 +1690,16 @@ impl SequencerPersistence for Persistence { .await?; drop(tx); - + if rows.is_empty() { + break; + } let mut values = Vec::new(); for row in rows.iter() { let leaf_hash: String = row.try_get("leaf_hash")?; - let data = row.try_get("data")?; + let data: Vec = row.try_get("data")?; - let qc: QuorumCertificate = serde_json::from_value(data)?; + let qc: QuorumCertificate = bincode::deserialize(&data)?; let qc2: QuorumCertificate2 = qc.to_qc2(); let view = qc2.view_number().u64() as i64; @@ -1717,7 +1730,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.write().await?; tx.upsert( "epoch_migration", - ["table_name", "is_completed"], + ["table_name", "completed"], ["table_name"], [("quorum_certificates".to_string(), true)], ) @@ -2016,15 +2029,24 @@ mod generic_tests { #[cfg(test)] mod test { + use std::marker::PhantomData; + use super::*; use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; - use espresso_types::{traits::NullEventConsumer, Leaf, NodeState, ValidatedState}; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; + use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ data::EpochNumber, simple_certificate::QuorumCertificate, - traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, + simple_vote::QuorumData, + traits::{ + block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, + signature_key::SignatureKey, + EncodeBytes, + }, vid::vid_scheme, }; use jf_vid::VidScheme; @@ -2363,4 +2385,182 @@ mod test { }) .await } + + #[tokio::test(flavor = "multi_thread")] + async fn test_consensus_migration() { + setup_test(); + + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + + let storage = opt.create().await.unwrap(); + + for i in 0..200 { + let (qp, leaf) = generate_leaf(i).await; + let qc = generate_qc(i).await; + let mut tx = storage.db.write().await.unwrap(); + + let qc_bytes = bincode::serialize(&qc).unwrap(); + let leaf_bytes = bincode::serialize(&leaf).unwrap(); + + tx.upsert( + "anchor_leaf", + ["view", "leaf", "qc"], + ["view"], + [(i as i64, leaf_bytes, qc_bytes)], + ) + .await + .unwrap(); + tx.commit().await.unwrap(); + + let genesis_payload = Leaf::genesis(&ValidatedState::default(), &NodeState::default()) + .await + .block_payload() + .unwrap(); + + let leaf_payload_bytes_arc = genesis_payload.encode(); + let disperse = vid_scheme(2) + .disperse(leaf_payload_bytes_arc.clone()) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let vid = VidDisperseShare:: { + view_number: ViewNumber::new(i), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(i), + }; + + let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) + .expect("Failed to sign block payload"); + + let da_proposal = Proposal { + data: da, + signature: block_payload_signature, + _pd: Default::default(), + }; + + storage + .append_vid(&vid.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + storage + .append_da(&da_proposal, disperse.commit) + .await + .unwrap(); + } + + let x = |v: Proposal>| { + let qc = v.data; + + let qc2 = qc.into(); + + Proposal { + data: qc2, + signature: v.signature, + _pd: PhantomData, + } + }; + + storage.migrate_consensus(Leaf2::from, x).await.unwrap(); + } + + async fn generate_qc(view: u64) -> QuorumCertificate { + let mut qc = QuorumCertificate::::genesis::( + &ValidatedState::default(), + &NodeState::default(), + ) + .await; + qc.view_number = ViewNumber::new(view); + + qc + } + + async fn generate_vid_share(view: u64) -> VidDisperseShare { + let genesis_payload = Leaf::genesis(&ValidatedState::default(), &NodeState::default()) + .await + .block_payload() + .unwrap(); + + let leaf_payload_bytes_arc = genesis_payload.encode(); + let disperse = vid_scheme(2) + .disperse(leaf_payload_bytes_arc.clone()) + .unwrap(); + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); + let vid = VidDisperseShare:: { + view_number: ViewNumber::new(view), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(view), + }; + + vid + } + + async fn generate_leaf(view: u64) -> (QuorumProposal, Leaf) { + let view = ViewNumber::new(view); + + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + let (payload, metadata) = Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc, + upgrade_certificate: None, + proposal_certificate: None, + }; + let leaf = Leaf::from_quorum_proposal(&quorum_proposal); + + (quorum_proposal, leaf) + } } From dbecff362be28cef1a23486e6947071669419ef4 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 15 Jan 2025 19:08:15 +0500 Subject: [PATCH 004/120] test --- sequencer/src/persistence/no_storage.rs | 19 +- sequencer/src/persistence/sql.rs | 244 +++++++++++++----------- 2 files changed, 145 insertions(+), 118 deletions(-) diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index 13c47bcf52..d5feecfe65 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -5,14 +5,11 @@ use anyhow::bail; use async_trait::async_trait; use espresso_types::{ v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence}, - Leaf, Leaf2, NetworkConfig, + Leaf2, NetworkConfig, }; use hotshot_types::{ consensus::CommitmentMap, - data::{ - DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, VidDisperseShare, - VidDisperseShare2, - }, + data::{DaProposal, DaProposal2, QuorumProposal2, VidDisperseShare, VidDisperseShare2}, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, @@ -179,30 +176,30 @@ impl SequencerPersistence for NoStorage { async fn append_vid2( &self, - proposal: &Proposal>, + _proposal: &Proposal>, ) -> anyhow::Result<()> { Ok(()) } async fn append_da2( &self, - proposal: &Proposal>, - vid_commit: ::Commit, + _proposal: &Proposal>, + _vid_commit: ::Commit, ) -> anyhow::Result<()> { Ok(()) } async fn append_proposal2( &self, - proposal: &Proposal>, + _proposal: &Proposal>, ) -> anyhow::Result<()> { Ok(()) } async fn update_undecided_state2( &self, - leaves: CommitmentMap, - state: BTreeMap>, + _leaves: CommitmentMap, + _state: BTreeMap>, ) -> anyhow::Result<()> { Ok(()) } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 272f89044a..65612c00ac 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -5,7 +5,7 @@ use committable::Committable; use derivative::Derivative; use derive_more::derive::{From, Into}; use espresso_types::{ - downgrade_commitment_map, downgrade_leaf, parse_duration, upgrade_commitment_map, + parse_duration, upgrade_commitment_map, v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence, StateCatchup}, BackoffParams, Leaf, Leaf2, NetworkConfig, Payload, }; @@ -20,7 +20,7 @@ use hotshot_query_service::{ Transaction, TransactionMode, Write, }, }, - SqlDataSource, Transaction as _, VersionedDataSource, + Transaction as _, VersionedDataSource, }, fetching::{ request::{LeafRequest, PayloadRequest, VidCommonRequest}, @@ -34,8 +34,7 @@ use hotshot_types::{ VidDisperseShare2, }, event::{Event, EventType, HotShotAction, LeafInfo}, - message::{convert_proposal, Proposal}, - qc, + message::Proposal, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, @@ -2037,17 +2036,8 @@ mod test { use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; - use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ - data::EpochNumber, - simple_certificate::QuorumCertificate, - simple_vote::QuorumData, - traits::{ - block_contents::{vid_commitment, GENESIS_VID_NUM_STORAGE_NODES}, - signature_key::SignatureKey, - EncodeBytes, - }, - vid::vid_scheme, + data::EpochNumber, message::convert_proposal, simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, vid::vid_scheme }; use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; @@ -2395,12 +2385,70 @@ mod test { let storage = opt.create().await.unwrap(); - for i in 0..200 { - let (qp, leaf) = generate_leaf(i).await; - let qc = generate_qc(i).await; + let rows = 300; + + for i in 0..rows { + let view = ViewNumber::new(i); + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let (payload, metadata) = + Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment(&payload_bytes, 4); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + + let proposal_bytes = bincode::serialize(&quorum_proposal) + .context("serializing proposal") + .unwrap(); + + let proposal = Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: PhantomData, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload(payload, 4).unwrap(); + let mut tx = storage.db.write().await.unwrap(); - let qc_bytes = bincode::serialize(&qc).unwrap(); + let qc_bytes = bincode::serialize(&justify_qc).unwrap(); let leaf_bytes = bincode::serialize(&leaf).unwrap(); tx.upsert( @@ -2413,16 +2461,8 @@ mod test { .unwrap(); tx.commit().await.unwrap(); - let genesis_payload = Leaf::genesis(&ValidatedState::default(), &NodeState::default()) - .await - .block_payload() - .unwrap(); + let disperse = vid_scheme(4).disperse(payload_bytes.clone()).unwrap(); - let leaf_payload_bytes_arc = genesis_payload.encode(); - let disperse = vid_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) - .unwrap(); - let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); let vid = VidDisperseShare:: { view_number: ViewNumber::new(i), payload_commitment: Default::default(), @@ -2442,8 +2482,8 @@ mod test { view_number: ViewNumber::new(i), }; - let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) - .expect("Failed to sign block payload"); + let block_payload_signature = + BLSPubKey::sign(&privkey, &payload_bytes).expect("Failed to sign block payload"); let da_proposal = Proposal { data: da, @@ -2459,9 +2499,39 @@ mod test { .append_da(&da_proposal, disperse.commit) .await .unwrap(); + + let leaf_hash = Committable::commit(&leaf); + let mut tx = storage.db.write().await.expect("failed to start write tx"); + tx.upsert( + "quorum_proposals", + ["view", "leaf_hash", "data"], + ["view"], + [(i as i64, leaf_hash.to_string(), proposal_bytes)], + ) + .await + .expect("failed to upsert quorum proposal"); + + let justify_qc = &proposal.data.justify_qc; + let justify_qc_bytes = bincode::serialize(&justify_qc) + .context("serializing QC") + .unwrap(); + tx.upsert( + "quorum_certificate", + ["view", "leaf_hash", "data"], + ["view"], + [( + justify_qc.view_number.u64() as i64, + justify_qc.data.leaf_commit.to_string(), + &justify_qc_bytes, + )], + ) + .await + .expect("failed to upsert qc"); + + tx.commit().await.expect("failed to commit"); } - let x = |v: Proposal>| { + let qp_fn = |v: Proposal>| { let qc = v.data; let qc2 = qc.into(); @@ -2473,94 +2543,54 @@ mod test { } }; - storage.migrate_consensus(Leaf2::from, x).await.unwrap(); - } - - async fn generate_qc(view: u64) -> QuorumCertificate { - let mut qc = QuorumCertificate::::genesis::( - &ValidatedState::default(), - &NodeState::default(), - ) - .await; - qc.view_number = ViewNumber::new(view); - - qc - } + storage.migrate_consensus(Leaf2::from, qp_fn).await.unwrap(); - async fn generate_vid_share(view: u64) -> VidDisperseShare { - let genesis_payload = Leaf::genesis(&ValidatedState::default(), &NodeState::default()) + let mut tx = storage.db.read().await.unwrap(); + let (anchor_leaf2_count,) = query_as::<(i64,)>("SELECT COUNT(*) from anchor_leaf2") + .fetch_one(tx.as_mut()) .await - .block_payload() .unwrap(); + assert_eq!( + anchor_leaf2_count, rows as i64, + "anchor leaf count does not match rows", + ); - let leaf_payload_bytes_arc = genesis_payload.encode(); - let disperse = vid_scheme(2) - .disperse(leaf_payload_bytes_arc.clone()) + let (da_proposal_count,) = query_as::<(i64,)>("SELECT COUNT(*) from da_proposal2") + .fetch_one(tx.as_mut()) + .await .unwrap(); - let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid = VidDisperseShare:: { - view_number: ViewNumber::new(view), - payload_commitment: Default::default(), - share: disperse.shares[0].clone(), - common: disperse.common, - recipient_key: pubkey, - }; - - let (payload, metadata) = - Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) - .await - .unwrap(); - - let da = DaProposal:: { - encoded_transactions: payload.encode(), - metadata, - view_number: ViewNumber::new(view), - }; - - vid - } - - async fn generate_leaf(view: u64) -> (QuorumProposal, Leaf) { - let view = ViewNumber::new(view); + assert_eq!( + da_proposal_count, rows as i64, + "da proposal count does not match rows", + ); - let validated_state = ValidatedState::default(); - let instance_state = NodeState::default(); - let (payload, metadata) = Payload::from_transactions([], &validated_state, &instance_state) + let (vid_share_count,) = query_as::<(i64,)>("SELECT COUNT(*) from vid_share2") + .fetch_one(tx.as_mut()) .await .unwrap(); - let builder_commitment = payload.builder_commitment(&metadata); - let payload_bytes = payload.encode(); - - let payload_commitment = vid_commitment(&payload_bytes, GENESIS_VID_NUM_STORAGE_NODES); - - let block_header = Header::genesis( - &instance_state, - payload_commitment, - builder_commitment, - metadata, + assert_eq!( + vid_share_count, rows as i64, + "vid share count does not match rows" ); - let null_quorum_data = QuorumData { - leaf_commit: Commitment::::default_commitment_no_preimage(), - }; - - let justify_qc = QuorumCertificate::new( - null_quorum_data.clone(), - null_quorum_data.commit(), - view, - None, - PhantomData, + let (quorum_proposals_count,) = + query_as::<(i64,)>("SELECT COUNT(*) from quorum_proposals2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + quorum_proposals_count, rows as i64, + "quorum proposals count does not match rows", ); - let quorum_proposal = QuorumProposal { - block_header, - view_number: view, - justify_qc, - upgrade_certificate: None, - proposal_certificate: None, - }; - let leaf = Leaf::from_quorum_proposal(&quorum_proposal); - - (quorum_proposal, leaf) + let (quorum_certificates_count,) = + query_as::<(i64,)>("SELECT COUNT(*) from quorum_certificate2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + assert_eq!( + quorum_certificates_count, rows as i64, + "quorum certificates count does not match rows", + ); } } From 27ab511d45f84de0a827e88357a3a0cc287bada4 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 20 Jan 2025 14:01:31 +0500 Subject: [PATCH 005/120] fs migration --- .../migrations/sqlite/V301__epoch_tables.sql | 2 +- sequencer/src/persistence/fs.rs | 654 ++++++++++++++++-- sequencer/src/persistence/no_storage.rs | 10 +- sequencer/src/persistence/sql.rs | 28 +- types/src/v0/traits.rs | 14 +- 5 files changed, 618 insertions(+), 90 deletions(-) diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql index c9092690e7..327326b82e 100644 --- a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -48,7 +48,7 @@ CREATE INDEX quorum_certificate2_leaf_hash_idx ON quorum_certificate (leaf_hash) CREATE TABLE epoch_migration ( table_name TEXT PRIMARY KEY, - completed bool DEFAULT FALSE + completed bool NOT NULL DEFAULT FALSE ); INSERT INTO epoch_migration ("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 161eba67f4..03ece51488 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -28,7 +28,7 @@ use hotshot_types::{ use jf_vid::VidScheme; use std::sync::Arc; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, fs::{self, File, OpenOptions}, io::{Read, Seek, SeekFrom, Write}, ops::RangeInclusive, @@ -37,7 +37,7 @@ use std::{ use crate::ViewNumber; -use espresso_types::{downgrade_commitment_map, downgrade_leaf, upgrade_commitment_map}; +use espresso_types::upgrade_commitment_map; /// Options for file system backed persistence. #[derive(Parser, Clone, Debug)] @@ -101,10 +101,24 @@ impl PersistenceOptions for Options { let store_undecided_state = self.store_undecided_state; let view_retention = self.consensus_view_retention; + let migration_path = path.join("migration"); + let migrated = if migration_path.is_file() { + let bytes = fs::read(&path).context(format!( + "unable to read leaf migration from {}", + path.display() + ))?; + let json = serde_json::from_slice(&bytes).context("config file is not valid JSON")?; + + serde_json::from_value(json).context("malformed config file")? + } else { + HashSet::new() + }; + Ok(Persistence { store_undecided_state, inner: Arc::new(RwLock::new(Inner { path, + migrated, view_retention, })), }) @@ -130,6 +144,7 @@ pub struct Persistence { struct Inner { path: PathBuf, view_retention: u64, + migrated: HashSet, } impl Inner { @@ -137,6 +152,10 @@ impl Inner { self.path.join("hotshot.cfg") } + fn migration(&self) -> PathBuf { + self.path.join("migration") + } + fn voted_view_path(&self) -> PathBuf { self.path.join("highest_voted_view") } @@ -146,6 +165,10 @@ impl Inner { self.path.join("decided_leaves") } + fn decided_leaf2_path(&self) -> PathBuf { + self.path.join("decided_leaves2") + } + /// The path from previous versions where there was only a single file for anchor leaves. fn legacy_anchor_leaf_path(&self) -> PathBuf { self.path.join("anchor_leaf") @@ -155,18 +178,34 @@ impl Inner { self.path.join("vid") } + fn vid2_dir_path(&self) -> PathBuf { + self.path.join("vid2") + } + fn da_dir_path(&self) -> PathBuf { self.path.join("da") } + fn da2_dir_path(&self) -> PathBuf { + self.path.join("da2") + } + fn undecided_state_path(&self) -> PathBuf { self.path.join("undecided_state") } + fn undecided2_state_path(&self) -> PathBuf { + self.path.join("undecided_state2") + } + fn quorum_proposals_dir_path(&self) -> PathBuf { self.path.join("quorum_proposals") } + fn quorum_proposals2_dir_path(&self) -> PathBuf { + self.path.join("quorum_proposals2") + } + fn upgrade_certificate_dir_path(&self) -> PathBuf { self.path.join("upgrade_certificate") } @@ -175,6 +214,20 @@ impl Inner { self.path.join("next_epoch_quorum_certificate") } + fn update_migration(&mut self) -> anyhow::Result<()> { + let path = self.migration(); + let bytes = bincode::serialize(&self.migrated)?; + + self.replace( + &path, + |_| Ok(true), + |mut file| { + file.write_all(&bytes)?; + Ok(()) + }, + ) + } + /// Overwrite a file if a condition is met. /// /// The file at `path`, if it exists, is opened in read mode and passed to `pred`. If `pred` @@ -258,12 +311,12 @@ impl Inner { Ok(()) }; - delete_files(intervals, None, self.da_dir_path())?; - delete_files(intervals, None, self.vid_dir_path())?; - delete_files(intervals, None, self.quorum_proposals_dir_path())?; + delete_files(intervals, None, self.da2_dir_path())?; + delete_files(intervals, None, self.vid2_dir_path())?; + delete_files(intervals, None, self.quorum_proposals2_dir_path())?; // Save the most recent leaf as it will be our anchor point if the node restarts. - delete_files(intervals, Some(view_number), self.decided_leaf_path())?; + delete_files(intervals, Some(view_number), self.decided_leaf2_path())?; Ok(()) } @@ -281,7 +334,7 @@ impl Inner { // separate event for each leaf because it is possible we have non-consecutive leaves in our // storage, which would not be valid as a single decide with a single leaf chain. let mut leaves = BTreeMap::new(); - for entry in fs::read_dir(self.decided_leaf_path())? { + for entry in fs::read_dir(self.decided_leaf2_path())? { let entry = entry?; let path = entry.path(); @@ -298,7 +351,7 @@ impl Inner { let bytes = fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; let (mut leaf, qc) = - bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + bincode::deserialize::<(Leaf2, QuorumCertificate2)>(&bytes) .context(format!("parsing decided leaf {}", path.display()))?; // Include the VID share if available. @@ -321,7 +374,7 @@ impl Inner { } let info = LeafInfo { - leaf: leaf.into(), + leaf, vid_share: vid_share.map(Into::into), // Note: the following fields are not used in Decide event processing, and should be @@ -352,7 +405,7 @@ impl Inner { .handle_event(&Event { view_number: ViewNumber::new(view), event: EventType::Decide { - qc: Arc::new(qc.to_qc2()), + qc: Arc::new(qc), leaf_chain: Arc::new(vec![leaf]), block_size: None, }, @@ -385,7 +438,7 @@ impl Inner { &self, view: ViewNumber, ) -> anyhow::Result>>> { - let dir_path = self.da_dir_path(); + let dir_path = self.da2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -404,7 +457,7 @@ impl Inner { &self, view: ViewNumber, ) -> anyhow::Result>>> { - let dir_path = self.vid_dir_path(); + let dir_path = self.vid2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -423,24 +476,20 @@ impl Inner { let mut anchor: Option<(Leaf2, QuorumCertificate2)> = None; // Return the latest decided leaf. - for entry in - fs::read_dir(self.decided_leaf_path()).context("opening decided leaf directory")? + for entry in fs::read_dir(self.decided_leaf2_path()) + .context("opening decided leaf2 directory")? { let file = entry.context("reading decided leaf directory")?.path(); let bytes = fs::read(&file).context(format!("reading decided leaf {}", file.display()))?; - let (leaf, qc) = - bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + let (leaf2, qc2) = + bincode::deserialize::<(Leaf2, QuorumCertificate2)>(&bytes) .context(format!("parsing decided leaf {}", file.display()))?; if let Some((anchor_leaf, _)) = &anchor { - if leaf.view_number() > anchor_leaf.view_number() { - let leaf2 = leaf.into(); - let qc2 = qc.to_qc2(); + if leaf2.view_number() > anchor_leaf.view_number() { anchor = Some((leaf2, qc2)); } } else { - let leaf2 = leaf.into(); - let qc2 = qc.to_qc2(); anchor = Some((leaf2, qc2)); } } @@ -511,7 +560,7 @@ impl SequencerPersistence for Persistence { consumer: &impl EventConsumer, ) -> anyhow::Result<()> { let mut inner = self.inner.write().await; - let path = inner.decided_leaf_path(); + let path = inner.decided_leaf2_path(); // Ensure the anchor leaf directory exists. fs::create_dir_all(&path).context("creating anchor leaf directory")?; @@ -547,9 +596,7 @@ impl SequencerPersistence for Persistence { Ok(false) }, |mut file| { - let leaf = downgrade_leaf(info.leaf.clone()); - let qc = qc2.to_qc(); - let bytes = bincode::serialize(&(&leaf, qc))?; + let bytes = bincode::serialize(&(&info.leaf.clone(), qc2))?; file.write_all(&bytes)?; Ok(()) }, @@ -586,14 +633,14 @@ impl SequencerPersistence for Persistence { &self, ) -> anyhow::Result, BTreeMap>)>> { let inner = self.inner.read().await; - let path = inner.undecided_state_path(); + let path = inner.undecided2_state_path(); if !path.is_file() { return Ok(None); } let bytes = fs::read(&path).context("read")?; - let value: (CommitmentMap, _) = + let value: (CommitmentMap, _) = bincode::deserialize(&bytes).context("deserialize")?; - Ok(Some((upgrade_commitment_map(value.0), value.1))) + Ok(Some((value.0, value.1))) } async fn load_da_proposal( @@ -689,19 +736,17 @@ impl SequencerPersistence for Persistence { }, ) } - async fn update_undecided_state( + async fn update_undecided_state2( &self, leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()> { - let leaves = downgrade_commitment_map(leaves); - if !self.store_undecided_state { return Ok(()); } let mut inner = self.inner.write().await; - let path = &inner.undecided_state_path(); + let path = &inner.undecided2_state_path(); inner.replace( path, |_| { @@ -720,11 +765,9 @@ impl SequencerPersistence for Persistence { &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - let proposal: Proposal> = - convert_proposal(proposal.clone()); let mut inner = self.inner.write().await; let view_number = proposal.data.view_number().u64(); - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); fs::create_dir_all(dir_path.clone()).context("failed to create proposals dir")?; @@ -749,7 +792,7 @@ impl SequencerPersistence for Persistence { let inner = self.inner.read().await; // First, get the proposal directory. - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); if !dir_path.is_dir() { return Ok(Default::default()); } @@ -786,9 +829,8 @@ impl SequencerPersistence for Persistence { let proposal_bytes = fs::read(file)?; // Then, deserialize. - let proposal: Proposal> = + let proposal2: Proposal> = bincode::deserialize(&proposal_bytes)?; - let proposal2 = convert_proposal(proposal); // Push to the map and we're done. map.insert(view_number, proposal2); @@ -803,11 +845,11 @@ impl SequencerPersistence for Persistence { view: ViewNumber, ) -> anyhow::Result>> { let inner = self.inner.read().await; - let dir_path = inner.quorum_proposals_dir_path(); + let dir_path = inner.quorum_proposals2_dir_path(); let file_path = dir_path.join(view.to_string()).with_extension("txt"); let bytes = fs::read(file_path)?; - let proposal: Proposal> = bincode::deserialize(&bytes)?; - let proposal2 = convert_proposal(proposal); + let proposal2: Proposal> = + bincode::deserialize(&bytes)?; Ok(proposal2) } @@ -889,29 +931,321 @@ impl SequencerPersistence for Persistence { &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - Ok(()) + let mut inner = self.inner.write().await; + let view_number = proposal.data.view_number().u64(); + let dir_path = inner.vid2_dir_path(); + + fs::create_dir_all(dir_path.clone()).context("failed to create vid2 dir")?; + + let file_path = dir_path.join(view_number.to_string()).with_extension("txt"); + inner.replace( + &file_path, + |_| { + // Don't overwrite an existing share, but warn about it as this is likely not intended + // behavior from HotShot. + tracing::warn!(view_number, "duplicate VID share"); + Ok(false) + }, + |mut file| { + let proposal_bytes = bincode::serialize(&proposal).context("serialize proposal")?; + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) } async fn append_da2( &self, proposal: &Proposal>, - vid_commit: ::Commit, + _vid_commit: ::Commit, ) -> anyhow::Result<()> { - Ok(()) + let mut inner = self.inner.write().await; + let view_number = proposal.data.view_number().u64(); + let dir_path = inner.da2_dir_path(); + + fs::create_dir_all(dir_path.clone()).context("failed to create da dir")?; + + let file_path = dir_path.join(view_number.to_string()).with_extension("txt"); + inner.replace( + &file_path, + |_| { + // Don't overwrite an existing proposal, but warn about it as this is likely not + // intended behavior from HotShot. + tracing::warn!(view_number, "duplicate DA proposal"); + Ok(false) + }, + |mut file| { + let proposal_bytes = bincode::serialize(&proposal).context("serialize proposal")?; + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) } async fn append_proposal2( &self, proposal: &Proposal>, ) -> anyhow::Result<()> { + self.append_quorum_proposal2(proposal).await + } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("anchor_leaf") { + return Ok(()); + } + + let decided_leaf2_path = inner.decided_leaf2_path(); + + fs::create_dir_all(decided_leaf2_path.clone()) + .context("failed to create anchor leaf 2 dir")?; + + let decided_leaf_path = inner.decided_leaf_path(); + if !decided_leaf_path.is_dir() { + return Ok(()); + } + + for entry in fs::read_dir(decided_leaf_path)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading decided leaf {}", path.display()))?; + let (leaf, qc) = bincode::deserialize::<(Leaf, QuorumCertificate)>(&bytes) + .context(format!("parsing decided leaf {}", path.display()))?; + + let leaf2: Leaf2 = leaf.into(); + let qc2 = qc.to_qc2(); + + let file_path = decided_leaf2_path + .join(view.to_string()) + .with_extension("txt"); + + inner.replace( + &file_path, + |_| { + tracing::warn!(view, "duplicate decided leaf"); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&(&leaf2.clone(), qc2))?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + } + + inner.migrated.insert("anchor_leaf".to_string()); + inner.update_migration()?; + Ok(()) } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; - async fn update_undecided_state2( - &self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()> { + if inner.migrated.contains("da_proposal") { + return Ok(()); + } + + let da2_path = inner.da2_dir_path(); + + fs::create_dir_all(da2_path.clone()).context("failed to create da proposals 2 dir")?; + + let da_dir = inner.da_dir_path(); + if !da_dir.is_dir() { + return Ok(()); + } + + for entry in fs::read_dir(da_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading da proposal {}", path.display()))?; + let proposal = bincode::deserialize::>>(&bytes) + .context(format!("parsing da proposal {}", path.display()))?; + + let file_path = da2_path.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = convert_proposal(proposal); + + inner.replace( + &file_path, + |_| { + tracing::warn!(view, "duplicate DA proposal 2"); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + } + + inner.migrated.insert("da_proposal".to_string()); + inner.update_migration() + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("vid_share") { + return Ok(()); + } + + let vid2_path = inner.vid2_dir_path(); + + fs::create_dir_all(vid2_path.clone()).context("failed to create vid shares 2 dir")?; + + let vid_dir = inner.vid_dir_path(); + if !vid_dir.is_dir() { + return Ok(()); + } + + for entry in fs::read_dir(vid_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = fs::read(&path).context(format!("reading vid share {}", path.display()))?; + let proposal = + bincode::deserialize::>>(&bytes) + .context(format!("parsing vid share {}", path.display()))?; + + let file_path = vid2_path.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = + convert_proposal(proposal); + + inner.replace( + &file_path, + |_| { + tracing::warn!(view, "duplicate VID share "); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + } + + inner.migrated.insert("vid_share".to_string()); + inner.update_migration() + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + if inner.migrated.contains("undecided_state") { + return Ok(()); + } + + let undecided_state2_path = &inner.undecided2_state_path(); + + let undecided_state_path = inner.undecided_state_path(); + + if !undecided_state_path.is_file() { + return Ok(()); + } + + let bytes = fs::read(&undecided_state_path).context("read")?; + let (leaves, state): (CommitmentMap, QuorumCertificate) = + bincode::deserialize(&bytes).context("deserialize")?; + + let leaves2 = upgrade_commitment_map(leaves); + let state2 = state.to_qc2(); + + inner.replace( + undecided_state2_path, + |_| { + // Always overwrite the previous file. + Ok(true) + }, + |mut file| { + let bytes = bincode::serialize(&(leaves2, state2)) + .context("serializing undecided state2")?; + file.write_all(&bytes)?; + Ok(()) + }, + ) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + let mut inner = self.inner.write().await; + + if inner.migrated.contains("quorum_proposals") { + return Ok(()); + } + + let qp2_path = inner.quorum_proposals2_dir_path(); + + fs::create_dir_all(qp2_path.clone()).context("failed to create quorum proposals 2 dir")?; + + let qp_dir = inner.quorum_proposals_dir_path(); + if !qp_dir.is_dir() { + return Ok(()); + } + + for entry in fs::read_dir(qp_dir)? { + let entry = entry?; + let path = entry.path(); + + let Some(file) = path.file_stem().and_then(|n| n.to_str()) else { + continue; + }; + let Ok(view) = file.parse::() else { + continue; + }; + + let bytes = + fs::read(&path).context(format!("reading quorum proposal {}", path.display()))?; + let proposal = + bincode::deserialize::>>(&bytes) + .context(format!("parsing quorum proposal {}", path.display()))?; + + let file_path = qp2_path.join(view.to_string()).with_extension("txt"); + + let proposal2: Proposal> = + convert_proposal(proposal); + + inner.replace( + &file_path, + |_| { + tracing::warn!(view, "duplicate Quorum proposal2 "); + Ok(false) + }, + |mut file| { + let bytes = bincode::serialize(&proposal2)?; + file.write_all(&bytes)?; + Ok(()) + }, + )?; + } + + inner.migrated.insert("quorum_proposals".to_string()); + inner.update_migration() + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { Ok(()) } } @@ -1008,10 +1342,25 @@ mod generic_tests { #[cfg(test)] mod test { + use sequencer_utils::test_utils::setup_test; use serde_json::json; + use std::marker::PhantomData; use super::*; + use crate::{persistence::testing::TestablePersistence, BLSPubKey}; + use committable::Committable; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{Header, Leaf, NodeState, ValidatedState}; + + use hotshot_types::{ + simple_certificate::QuorumCertificate, + simple_vote::QuorumData, + traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, + vid::vid_scheme, + }; + use jf_vid::VidScheme; + #[test] fn test_config_migrations_add_builder_urls() { let before = json!({ @@ -1109,4 +1458,211 @@ mod test { assert_eq!(migrate_network_config(before.clone()).unwrap(), before); } + + #[tokio::test(flavor = "multi_thread")] + pub async fn test_consensus_migration() { + setup_test(); + let rows = 300; + let tmp = Persistence::tmp_storage().await; + let mut opt = Persistence::options(&tmp); + let storage = opt.create().await.unwrap(); + + let inner = storage.inner.read().await; + + let decided_leaves_path = inner.decided_leaf_path(); + fs::create_dir_all(decided_leaves_path.clone()).expect("failed to create proposals dir"); + + let qp_dir_path = inner.quorum_proposals_dir_path(); + fs::create_dir_all(qp_dir_path.clone()).expect("failed to create proposals dir"); + drop(inner); + + for i in 0..rows { + let view = ViewNumber::new(i); + let validated_state = ValidatedState::default(); + let instance_state = NodeState::default(); + + let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], i); + let (payload, metadata) = + Payload::from_transactions([], &validated_state, &instance_state) + .await + .unwrap(); + let builder_commitment = payload.builder_commitment(&metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment(&payload_bytes, 4); + + let block_header = Header::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::::default_commitment_no_preimage(), + }; + + let justify_qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: justify_qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let quorum_proposal_signature = + BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) + .expect("Failed to sign quorum proposal"); + + let proposal = Proposal { + data: quorum_proposal.clone(), + signature: quorum_proposal_signature, + _pd: PhantomData, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload(payload, 4).unwrap(); + + let mut inner = storage.inner.write().await; + + tracing::debug!("inserting decided leaves"); + let file_path = decided_leaves_path + .join(view.to_string()) + .with_extension("txt"); + + tracing::debug!("inserting decided leaves"); + + inner + .replace( + &file_path, + |_| Ok(true), + |mut file| { + let bytes = bincode::serialize(&(&leaf.clone(), justify_qc))?; + file.write_all(&bytes)?; + Ok(()) + }, + ) + .expect("replace decided leaves"); + + let file_path = qp_dir_path.join(view.to_string()).with_extension("txt"); + + tracing::debug!("inserting qc for {view}"); + + inner + .replace( + &file_path, + |_| Ok(true), + |mut file| { + let proposal_bytes = + bincode::serialize(&proposal).context("serialize proposal")?; + + file.write_all(&proposal_bytes)?; + Ok(()) + }, + ) + .unwrap(); + + drop(inner); + let disperse = vid_scheme(4).disperse(payload_bytes.clone()).unwrap(); + + let vid = VidDisperseShare:: { + view_number: ViewNumber::new(i), + payload_commitment: Default::default(), + share: disperse.shares[0].clone(), + common: disperse.common, + recipient_key: pubkey, + }; + + let (payload, metadata) = + Payload::from_transactions([], &ValidatedState::default(), &NodeState::default()) + .await + .unwrap(); + + let da = DaProposal:: { + encoded_transactions: payload.encode(), + metadata, + view_number: ViewNumber::new(i), + }; + + let block_payload_signature = + BLSPubKey::sign(&privkey, &payload_bytes).expect("Failed to sign block payload"); + + let da_proposal = Proposal { + data: da, + signature: block_payload_signature, + _pd: Default::default(), + }; + + tracing::debug!("inserting vid for {view}"); + storage + .append_vid(&vid.to_proposal(&privkey).unwrap()) + .await + .unwrap(); + + tracing::debug!("inserting da for {view}"); + storage + .append_da(&da_proposal, disperse.commit) + .await + .unwrap(); + } + + let qp_fn = |v: Proposal>| { + let qc = v.data; + + let qc2 = qc.into(); + + Proposal { + data: qc2, + signature: v.signature, + _pd: PhantomData, + } + }; + + storage.migrate_consensus(Leaf2::from, qp_fn).await.unwrap(); + let inner = storage.inner.read().await; + let decided_leaves = fs::read_dir(inner.decided_leaf2_path()).unwrap(); + let decided_leaves_count = decided_leaves + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + decided_leaves_count, rows as usize, + "decided leaves count does not match", + ); + + let da_proposals = fs::read_dir(inner.da2_dir_path()).unwrap(); + let da_proposals_count = da_proposals + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + da_proposals_count, rows as usize, + "da proposals does not match", + ); + + let vids = fs::read_dir(inner.vid2_dir_path()).unwrap(); + let vids_count = vids + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!(vids_count, rows as usize, "vid shares count does not match",); + + let qps = fs::read_dir(inner.quorum_proposals2_dir_path()).unwrap(); + let qps_count = qps + .filter_map(Result::ok) + .filter(|e| e.path().is_file()) + .count(); + assert_eq!( + qps_count, rows as usize, + "quorum proposals count does not match", + ); + } } diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index d5feecfe65..2ec615dd75 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -141,7 +141,7 @@ impl SequencerPersistence for NoStorage { async fn record_action(&self, _view: ViewNumber, _action: HotShotAction) -> anyhow::Result<()> { Ok(()) } - async fn update_undecided_state( + async fn update_undecided_state2( &self, _leaves: CommitmentMap, _state: BTreeMap>, @@ -195,12 +195,4 @@ impl SequencerPersistence for NoStorage { ) -> anyhow::Result<()> { Ok(()) } - - async fn update_undecided_state2( - &self, - _leaves: CommitmentMap, - _state: BTreeMap>, - ) -> anyhow::Result<()> { - Ok(()) - } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 65612c00ac..33e6ae95d6 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1200,28 +1200,7 @@ impl SequencerPersistence for Persistence { tx.execute(query(&stmt).bind(view.u64() as i64)).await?; tx.commit().await } - async fn update_undecided_state( - &self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()> { - if !self.store_undecided_state { - return Ok(()); - } - - let leaves_bytes = bincode::serialize(&leaves).context("serializing leaves")?; - let state_bytes = bincode::serialize(&state).context("serializing state")?; - let mut tx = self.db.write().await?; - tx.upsert( - "undecided_state2", - ["id", "leaves", "state"], - ["id"], - [(0_i32, leaves_bytes, state_bytes)], - ) - .await?; - tx.commit().await - } async fn append_quorum_proposal2( &self, proposal: &Proposal>, @@ -2037,7 +2016,12 @@ mod test { use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::EpochNumber, message::convert_proposal, simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, vid::vid_scheme + data::EpochNumber, + message::convert_proposal, + simple_certificate::QuorumCertificate, + simple_vote::QuorumData, + traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, + vid::vid_scheme, }; use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 32a9f32b6b..a3d96ec2b3 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -672,7 +672,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { vid_commit: ::Commit, ) -> anyhow::Result<()>; async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()>; - async fn update_undecided_state( + + async fn update_undecided_state2( &self, leaves: CommitmentMap, state: BTreeMap>, @@ -692,6 +693,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Proposal>, ) -> Proposal>, ) -> anyhow::Result<()> { + tracing::info!("migrating consensus data"); + self.migrate_anchor_leaf().await?; self.migrate_da_proposals().await?; self.migrate_vid_shares().await?; @@ -752,12 +755,6 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { ) -> anyhow::Result<()> { self.append_quorum_proposal2(proposal).await } - - async fn update_undecided_state2( - &self, - leaves: CommitmentMap, - state: BTreeMap>, - ) -> anyhow::Result<()>; } #[async_trait] @@ -816,7 +813,7 @@ impl Storage for Arc

{ state: BTreeMap>, ) -> anyhow::Result<()> { (**self) - .update_undecided_state( + .update_undecided_state2( leaves .into_values() .map(|leaf| { @@ -828,7 +825,6 @@ impl Storage for Arc

{ ) .await } - async fn append_proposal( &self, proposal: &Proposal>, From 471f7ca80e67f5f60b43332495793f97df810eea Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 20 Jan 2025 18:05:25 +0500 Subject: [PATCH 006/120] fix queries and tests --- Cargo.lock | 6 +- Cargo.toml | 4 +- .../postgres/V501__epoch_tables.sql | 6 +- .../migrations/sqlite/V301__epoch_tables.sql | 6 +- sequencer/src/api.rs | 37 ++++----- sequencer/src/api/sql.rs | 14 ++-- sequencer/src/persistence.rs | 36 ++++----- sequencer/src/persistence/fs.rs | 2 +- sequencer/src/persistence/sql.rs | 76 +++++++++---------- sequencer/src/state.rs | 4 +- types/src/v0/traits.rs | 8 +- 11 files changed, 98 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a9df9aa6f..0904e2f913 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4250,7 +4250,7 @@ dependencies = [ [[package]] name = "hotshot-query-service" version = "0.1.76" -source = "git+https://github.com/EspressoSystems/hotshot-query-service?tag=v0.1.76#f27507fca3d54fb80c18d952c22f39b04f7d195f" +source = "git+https://github.com/EspressoSystems/hotshot-query-service?branch=ab%2Fleaf2-migration#91412f2c93dcdc007ffba619654a40178c8d6b3e" dependencies = [ "anyhow", "ark-serialize", @@ -9325,7 +9325,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.95", @@ -11795,4 +11795,4 @@ checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", -] \ No newline at end of file +] diff --git a/Cargo.toml b/Cargo.toml index 355ea195f0..d6127371b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ marketplace-builder-core = { git = "https://github.com/EspressoSystems/marketpla marketplace-builder-shared = { git = "https://github.com/EspressoSystems/marketplace-builder-core", tag = "0.1.59" } hotshot-events-service = { git = "https://github.com/EspressoSystems/hotshot-events-service.git", tag = "0.1.57" } hotshot-orchestrator = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.83" } -hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", tag = "v0.1.76" } +hotshot-query-service = { git = "https://github.com/EspressoSystems/hotshot-query-service", branch = "ab/leaf2-migration" } hotshot-stake-table = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.83" } hotshot-state-prover = { version = "0.1.0", path = "hotshot-state-prover" } hotshot-task = { git = "https://github.com/EspressoSystems/hotshot", tag = "0.5.83" } @@ -173,4 +173,4 @@ hotshot-task-impls = { git = "https://www.github.com/EspressoSystems/HotShot.git hotshot-testing = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } hotshot-types = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } libp2p-networking = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } -hotshot-example-types = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } \ No newline at end of file +hotshot-example-types = { git = "https://www.github.com/EspressoSystems/HotShot.git", tag = "0.5.83-patch1" } diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql index 987803be36..6433c495f2 100644 --- a/sequencer/api/migrations/postgres/V501__epoch_tables.sql +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -1,7 +1,7 @@ CREATE TABLE anchor_leaf2 ( view BIGINT PRIMARY KEY, - leaf2 BYTEA, - qc2 BYTEA + leaf BYTEA, + qc BYTEA ); @@ -51,4 +51,4 @@ CREATE TABLE epoch_migration ( completed bool DEFAULT FALSE ); -INSERT INTO epoch_migrations ("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificates'); \ No newline at end of file diff --git a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql index 327326b82e..c11ec79b0a 100644 --- a/sequencer/api/migrations/sqlite/V301__epoch_tables.sql +++ b/sequencer/api/migrations/sqlite/V301__epoch_tables.sql @@ -1,7 +1,7 @@ CREATE TABLE anchor_leaf2 ( view BIGINT PRIMARY KEY, - leaf2 BLOB, - qc2 BLOB + leaf BLOB, + qc BLOB ); @@ -51,4 +51,4 @@ CREATE TABLE epoch_migration ( completed bool NOT NULL DEFAULT FALSE ); -INSERT INTO epoch_migration ("table_name") VALUES ("anchor_leaf"), ("da_proposal"), ("vid_share"), ("undecided_state"), ("quorum_proposals"), ("quorum_certificates"); \ No newline at end of file +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate'); \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 5acf0c8854..9077a1e1b2 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1065,8 +1065,10 @@ mod api_tests { use hotshot_query_service::availability::{ AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; + use hotshot_types::data::{DaProposal2, EpochNumber, VidDisperseShare2}; + use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ - data::{DaProposal, QuorumProposal2, VidDisperseShare}, + data::QuorumProposal2, event::LeafInfo, message::Proposal, simple_certificate::QuorumCertificate, @@ -1246,7 +1248,7 @@ mod api_tests { // Create two non-consecutive leaf chains. let mut chain1 = vec![]; - let genesis = Leaf::genesis(&Default::default(), &NodeState::mock()).await; + let genesis = Leaf2::genesis(&Default::default(), &NodeState::mock()).await; let payload = genesis.block_payload().unwrap(); let payload_bytes_arc = payload.encode(); let disperse = vid_scheme(2).disperse(payload_bytes_arc.clone()).unwrap(); @@ -1254,23 +1256,21 @@ mod api_tests { let mut quorum_proposal = QuorumProposal2:: { block_header: genesis.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, next_epoch_justify_qc: None, }; - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let mut justify_qc = qc.clone(); for i in 0..5 { @@ -1297,25 +1297,29 @@ mod api_tests { .unwrap(); // Include VID information for each leaf. - let share = VidDisperseShare:: { + let share = VidDisperseShare2:: { view_number: leaf.view_number(), payload_commitment, share: disperse.shares[0].clone(), common: disperse.common.clone(), recipient_key: pubkey, + epoch: EpochNumber::new(0), + target_epoch: EpochNumber::new(0), + data_epoch_payload_commitment: None, }; persistence - .append_vid(&share.to_proposal(&privkey).unwrap()) + .append_vid2(&share.to_proposal(&privkey).unwrap()) .await .unwrap(); // Include payload information for each leaf. let block_payload_signature = PubKey::sign(&privkey, &payload_bytes_arc).expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: payload_bytes_arc.clone(), metadata: payload.ns_table().clone(), view_number: leaf.view_number(), + epoch: EpochNumber::new(0), }; let da_proposal = Proposal { data: da_proposal_inner, @@ -1323,7 +1327,7 @@ mod api_tests { _pd: Default::default(), }; persistence - .append_da(&da_proposal, payload_commitment) + .append_da2(&da_proposal, payload_commitment) .await .unwrap(); } @@ -1369,8 +1373,8 @@ mod api_tests { for (leaf, qc) in chain1.iter().chain(&chain2) { tracing::info!(height = leaf.height(), "check archive"); let qd = data_source.get_leaf(leaf.height() as usize).await.await; - let stored_leaf: Leaf2 = qd.leaf().clone().into(); - let stored_qc = qd.qc().clone().to_qc2(); + let stored_leaf: Leaf2 = qd.leaf().clone(); + let stored_qc = qd.qc().clone(); assert_eq!(&stored_leaf, leaf); assert_eq!(&stored_qc, qc); @@ -1489,10 +1493,7 @@ mod api_tests { .unwrap(); // Check that we still processed the leaf. - assert_eq!( - leaf, - data_source.get_leaf(1).await.await.leaf().clone().into() - ); + assert_eq!(leaf, data_source.get_leaf(1).await.await.leaf().clone()); assert!(data_source.get_vid_common(1).await.is_pending()); assert!(data_source.get_block(1).await.is_pending()); } diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 359a3e260c..79ccdb8fb5 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -4,7 +4,7 @@ use committable::{Commitment, Committable}; use espresso_types::{ get_l1_deposits, v0_99::{ChainConfig, IterableFeeInfo}, - BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf, Leaf2, NodeState, ValidatedState, + BlockMerkleTree, FeeAccount, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use hotshot::traits::ValidatedState as _; use hotshot_query_service::{ @@ -21,7 +21,7 @@ use hotshot_query_service::{ Resolvable, }; use hotshot_types::{ - data::{QuorumProposal, ViewNumber}, + data::{QuorumProposal2, ViewNumber}, message::Proposal, traits::node_implementation::ConsensusTime, }; @@ -257,7 +257,7 @@ async fn load_accounts( } } - Ok((snapshot, leaf.leaf().clone().into())) + Ok((snapshot, leaf.leaf().clone())) } async fn load_chain_config( @@ -286,7 +286,7 @@ async fn reconstruct_state( .get_leaf((from_height as usize).into()) .await .context(format!("leaf {from_height} not available"))?; - let from_leaf: Leaf2 = from_leaf.leaf().clone().into(); + let from_leaf: Leaf2 = from_leaf.leaf().clone(); ensure!( from_leaf.view_number() < to_view, "state reconstruction: starting state {:?} must be before ending state {to_view:?}", @@ -440,13 +440,13 @@ where P: Type + for<'q> Encode<'q, Db>, { let (data,) = query_as::<(Vec,)>(&format!( - "SELECT data FROM quorum_proposals WHERE {where_clause} LIMIT 1", + "SELECT data FROM quorum_proposals2 WHERE {where_clause} LIMIT 1", )) .bind(param) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - Ok(Leaf::from_quorum_proposal(&proposal.data).into()) + let proposal: Proposal> = bincode::deserialize(&data)?; + Ok(Leaf2::from_quorum_proposal(&proposal.data)) } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 8f4aefe3f6..89891c598c 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -55,13 +55,12 @@ mod persistence_tests { use hotshot::types::{BLSPubKey, SignatureKey}; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::{ - DaProposal, DaProposal2, EpochNumber, QuorumProposal2, VidDisperseShare, - VidDisperseShare2, ViewNumber, - }, + data::{DaProposal2, EpochNumber, QuorumProposal2, VidDisperseShare2, ViewNumber}, event::{EventType, HotShotAction, LeafInfo}, message::{Proposal, UpgradeLock}, - simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate, UpgradeCertificate}, + simple_certificate::{ + NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, + }, simple_vote::{NextEpochQuorumData2, QuorumData2, UpgradeProposalData, VersionedVoteData}, traits::{block_contents::vid_commitment, node_implementation::ConsensusTime, EncodeBytes}, vid::vid_scheme, @@ -190,12 +189,11 @@ mod persistence_tests { data: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -652,12 +650,15 @@ mod persistence_tests { .disperse(leaf_payload_bytes_arc.clone()) .unwrap(); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let mut vid = VidDisperseShare:: { + let mut vid = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment: Default::default(), share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, + epoch: EpochNumber::new(0), + target_epoch: EpochNumber::new(0), + data_epoch_payload_commitment: None, } .to_proposal(&privkey) .unwrap() @@ -665,31 +666,30 @@ mod persistence_tests { let mut quorum_proposal = QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, next_epoch_justify_qc: None, }; - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); + .await; let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let mut da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: EpochNumber::new(0), }, signature: block_payload_signature, _pd: Default::default(), @@ -710,8 +710,8 @@ mod persistence_tests { // Add proposals. for (_, _, vid, da) in &chain { tracing::info!(?da, ?vid, "insert proposal"); - storage.append_da(da, vid_commitment).await.unwrap(); - storage.append_vid(vid).await.unwrap(); + storage.append_da2(da, vid_commitment).await.unwrap(); + storage.append_vid2(vid).await.unwrap(); } // Decide 2 leaves, but fail in event processing. diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 03ece51488..385a4bf02e 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -472,7 +472,7 @@ impl Inner { } fn load_anchor_leaf(&self) -> anyhow::Result)>> { - if self.decided_leaf_path().is_dir() { + if self.decided_leaf2_path().is_dir() { let mut anchor: Option<(Leaf2, QuorumCertificate2)> = None; // Return the latest decided leaf. diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 33e6ae95d6..b0d02be632 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -632,9 +632,10 @@ impl Persistence { }; let mut parent = None; - let mut rows = query("SELECT leaf, qc FROM anchor_leaf WHERE view >= $1 ORDER BY view") - .bind(from_view) - .fetch(tx.as_mut()); + let mut rows = + query("SELECT leaf, qc FROM anchor_leaf2 WHERE view >= $1 ORDER BY view") + .bind(from_view) + .fetch(tx.as_mut()); let mut leaves = vec![]; let mut final_qc = None; while let Some(row) = rows.next().await { @@ -649,9 +650,9 @@ impl Persistence { }; let leaf_data: Vec = row.get("leaf"); - let leaf = bincode::deserialize::(&leaf_data)?; + let leaf = bincode::deserialize::(&leaf_data)?; let qc_data: Vec = row.get("qc"); - let qc = bincode::deserialize::>(&qc_data)?; + let qc = bincode::deserialize::>(&qc_data)?; let height = leaf.block_header().block_number(); // Ensure we are only dealing with a consecutive chain of leaves. We don't want to @@ -687,7 +688,7 @@ impl Persistence { // Collect VID shares for the decide event. let mut vid_shares = tx .fetch_all( - query("SELECT view, data FROM vid_share where view >= $1 AND view <= $2") + query("SELECT view, data FROM vid_share2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -697,7 +698,7 @@ impl Persistence { let view: i64 = row.get("view"); let data: Vec = row.get("data"); let vid_proposal = bincode::deserialize::< - Proposal>, + Proposal>, >(&data)?; Ok((view as u64, vid_proposal.data)) }) @@ -706,7 +707,7 @@ impl Persistence { // Collect DA proposals for the decide event. let mut da_proposals = tx .fetch_all( - query("SELECT view, data FROM da_proposal where view >= $1 AND view <= $2") + query("SELECT view, data FROM da_proposal2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -716,7 +717,7 @@ impl Persistence { let view: i64 = row.get("view"); let data: Vec = row.get("data"); let da_proposal = - bincode::deserialize::>>(&data)?; + bincode::deserialize::>>(&data)?; Ok((view as u64, da_proposal.data)) }) .collect::>>()?; @@ -751,7 +752,7 @@ impl Persistence { } LeafInfo { - leaf: leaf.into(), + leaf, vid_share: vid_share.map(Into::into), // Note: the following fields are not used in Decide event processing, and // should be removed. For now, we just default them. @@ -768,7 +769,7 @@ impl Persistence { view_number: to_view, event: EventType::Decide { leaf_chain: Arc::new(leaf_chain), - qc: Arc::new(final_qc.to_qc2()), + qc: Arc::new(final_qc), block_size: None, }, }) @@ -791,25 +792,25 @@ impl Persistence { // Delete the data that has been fully processed. tx.execute( - query("DELETE FROM vid_share where view >= $1 AND view <= $2") + query("DELETE FROM vid_share2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM da_proposal where view >= $1 AND view <= $2") + query("DELETE FROM da_proposal2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM quorum_proposals where view >= $1 AND view <= $2") + query("DELETE FROM quorum_proposals2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) .await?; tx.execute( - query("DELETE FROM quorum_certificate where view >= $1 AND view <= $2") + query("DELETE FROM quorum_certificate2 where view >= $1 AND view <= $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -819,7 +820,7 @@ impl Persistence { // less than the given value). This is necessary to ensure that, in case of a restart, // we can resume from the last decided leaf. tx.execute( - query("DELETE FROM anchor_leaf WHERE view >= $1 AND view < $2") + query("DELETE FROM anchor_leaf2 WHERE view >= $1 AND view < $2") .bind(from_view.u64() as i64) .bind(to_view.u64() as i64), ) @@ -977,7 +978,7 @@ impl SequencerPersistence for Persistence { // event consumer later fails, there is no need to abort the storage of the leaves. let mut tx = self.db.write().await?; - tx.upsert("anchor_leaf2", ["view", "leaf2", "qc2"], ["view"], values) + tx.upsert("anchor_leaf2", ["view", "leaf", "qc"], ["view"], values) .await?; tx.commit().await?; @@ -1020,16 +1021,16 @@ impl SequencerPersistence for Persistence { .db .read() .await? - .fetch_optional("SELECT leaf2, qc2 FROM anchor_leaf2 ORDER BY view DESC LIMIT 1") + .fetch_optional("SELECT leaf, qc FROM anchor_leaf2 ORDER BY view DESC LIMIT 1") .await? else { return Ok(None); }; - let leaf_bytes: Vec = row.get("leaf2"); + let leaf_bytes: Vec = row.get("leaf"); let leaf2: Leaf2 = bincode::deserialize(&leaf_bytes)?; - let qc_bytes: Vec = row.get("qc2"); + let qc_bytes: Vec = row.get("qc"); let qc2: QuorumCertificate2 = bincode::deserialize(&qc_bytes)?; Ok(Some((leaf2, qc2))) @@ -1037,7 +1038,7 @@ impl SequencerPersistence for Persistence { async fn load_anchor_view(&self) -> anyhow::Result { let mut tx = self.db.read().await?; - let (view,) = query_as::<(i64,)>("SELECT coalesce(max(view), 0) FROM anchor_leaf") + let (view,) = query_as::<(i64,)>("SELECT coalesce(max(view), 0) FROM anchor_leaf2") .fetch_one(tx.as_mut()) .await?; Ok(ViewNumber::new(view as u64)) @@ -1074,7 +1075,7 @@ impl SequencerPersistence for Persistence { .read() .await? .fetch_optional( - query("SELECT data FROM da_proposal where view = $1").bind(view.u64() as i64), + query("SELECT data FROM da_proposal2 where view = $1").bind(view.u64() as i64), ) .await?; @@ -1095,7 +1096,7 @@ impl SequencerPersistence for Persistence { .read() .await? .fetch_optional( - query("SELECT data FROM vid_share where view = $1").bind(view.u64() as i64), + query("SELECT data FROM vid_share2 where view = $1").bind(view.u64() as i64), ) .await?; @@ -1323,7 +1324,7 @@ impl SequencerPersistence for Persistence { } let mut query_builder: sqlx::QueryBuilder = - sqlx::QueryBuilder::new("INSERT INTO anchor_leaf2 (view, leaf2, qc2) "); + sqlx::QueryBuilder::new("INSERT INTO anchor_leaf2 (view, leaf, qc) "); query_builder.push_values(values.into_iter(), |mut b, (view, leaf, qc)| { b.push_bind(view).push_bind(leaf).push_bind(qc); @@ -1649,7 +1650,7 @@ impl SequencerPersistence for Persistence { let mut tx = self.db.read().await?; let (is_completed,) = query_as::<(bool,)>( - "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificates'", + "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificate'", ) .fetch_one(tx.as_mut()) .await?; @@ -1710,7 +1711,7 @@ impl SequencerPersistence for Persistence { "epoch_migration", ["table_name", "completed"], ["table_name"], - [("quorum_certificates".to_string(), true)], + [("quorum_certificate".to_string(), true)], ) .await?; tx.commit().await?; @@ -1929,10 +1930,10 @@ impl Provider> for Persistence { async fn fetch_leaf_from_proposals( tx: &mut Transaction, req: LeafRequest, -) -> anyhow::Result<(Leaf, QuorumCertificate)> { +) -> anyhow::Result<(Leaf2, QuorumCertificate2)> { // Look for a quorum proposal corresponding to this leaf. let (proposal_bytes,) = - query_as::<(Vec,)>("SELECT data FROM quorum_proposals WHERE leaf_hash = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_proposals2 WHERE leaf_hash = $1 LIMIT 1") .bind(req.expected_leaf.to_string()) .fetch_one(tx.as_mut()) .await @@ -1940,18 +1941,18 @@ async fn fetch_leaf_from_proposals( // Look for a QC corresponding to this leaf. let (qc_bytes,) = - query_as::<(Vec,)>("SELECT data FROM quorum_certificate WHERE leaf_hash = $1 LIMIT 1") + query_as::<(Vec,)>("SELECT data FROM quorum_certificate2 WHERE leaf_hash = $1 LIMIT 1") .bind(req.expected_leaf.to_string()) .fetch_one(tx.as_mut()) .await .context("fetching QC")?; - let proposal: Proposal> = + let proposal: Proposal> = bincode::deserialize(&proposal_bytes).context("deserializing quorum proposal")?; - let qc: QuorumCertificate = + let qc: QuorumCertificate2 = bincode::deserialize(&qc_bytes).context("deserializing quorum certificate")?; - let leaf = Leaf::from_quorum_proposal(&proposal.data); + let leaf = Leaf2::from_quorum_proposal(&proposal.data); Ok((leaf, qc)) } @@ -2112,7 +2113,7 @@ mod test { let storage = Persistence::connect(&tmp).await; // Mock up some data. - let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = Leaf2::genesis(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); let disperse = vid_scheme(2) @@ -2134,7 +2135,7 @@ mod test { let quorum_proposal = QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: leaf.view_number(), - justify_qc: leaf.justify_qc().to_qc2(), + justify_qc: leaf.justify_qc(), upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -2164,8 +2165,7 @@ mod test { let mut next_quorum_proposal = quorum_proposal.clone(); next_quorum_proposal.data.view_number += 1; next_quorum_proposal.data.justify_qc.view_number += 1; - next_quorum_proposal.data.justify_qc.data.leaf_commit = - Committable::commit(&leaf.clone().into()); + next_quorum_proposal.data.justify_qc.data.leaf_commit = Committable::commit(&leaf.clone()); let qc = &next_quorum_proposal.data.justify_qc; // Add to database. @@ -2201,12 +2201,12 @@ mod test { .unwrap() ); assert_eq!( - LeafQueryData::new(leaf.clone(), qc.clone().to_qc()).unwrap(), + LeafQueryData::new(leaf.clone(), qc.clone()).unwrap(), storage .fetch(LeafRequest::new( leaf.block_header().block_number(), Committable::commit(&leaf), - qc.clone().to_qc().commit() + qc.clone().commit() )) .await .unwrap() diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index e4b3abe6e3..362589efad 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -155,8 +155,8 @@ where parent_state, instance, peers, - &parent_leaf.leaf().clone().into(), - &proposed_leaf.leaf().clone().into(), + &parent_leaf.leaf().clone(), + &proposed_leaf.leaf().clone(), ) .await .context("computing state update")?; diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index a3d96ec2b3..33a8be3dcb 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -523,12 +523,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { None => { tracing::info!("no saved leaf, starting from genesis leaf"); ( - hotshot_types::data::Leaf::genesis(&genesis_validated_state, &state) - .await - .into(), - QuorumCertificate::genesis::(&genesis_validated_state, &state) - .await - .to_qc2(), + hotshot_types::data::Leaf2::genesis(&genesis_validated_state, &state).await, + QuorumCertificate2::genesis::(&genesis_validated_state, &state).await, None, ) } From ff3d533016a51b3971ec00b9ec64f1b504779060 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 20 Jan 2025 19:28:22 +0500 Subject: [PATCH 007/120] sequencer sqlite lock file --- sequencer-sqlite/Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index d7b35400fb..5dd4ee9a89 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4078,7 +4078,7 @@ dependencies = [ [[package]] name = "hotshot-query-service" version = "0.1.76" -source = "git+https://github.com/EspressoSystems/hotshot-query-service?tag=v0.1.76#f27507fca3d54fb80c18d952c22f39b04f7d195f" +source = "git+https://github.com/EspressoSystems/hotshot-query-service?branch=ab%2Fleaf2-migration#91412f2c93dcdc007ffba619654a40178c8d6b3e" dependencies = [ "anyhow", "ark-serialize", From c573ea61dce7db465af36b6456870edf989c7dc7 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 20 Jan 2025 23:19:17 +0500 Subject: [PATCH 008/120] fix postgres epoch migration --- sequencer/api/migrations/postgres/V501__epoch_tables.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql index 6433c495f2..cebfeaa7ca 100644 --- a/sequencer/api/migrations/postgres/V501__epoch_tables.sql +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -51,4 +51,4 @@ CREATE TABLE epoch_migration ( completed bool DEFAULT FALSE ); -INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificates'); \ No newline at end of file +INSERT INTO epoch_migration (table_name) VALUES ('anchor_leaf'), ('da_proposal'), ('vid_share'), ('undecided_state'), ('quorum_proposals'), ('quorum_certificate'); \ No newline at end of file From 5802184511f0e0e497c9332cc9ba559dce385077 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 21 Jan 2025 00:32:34 +0500 Subject: [PATCH 009/120] fix undecided_state2 migration --- sequencer/api/migrations/postgres/V501__epoch_tables.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/api/migrations/postgres/V501__epoch_tables.sql b/sequencer/api/migrations/postgres/V501__epoch_tables.sql index cebfeaa7ca..9a25f42c20 100644 --- a/sequencer/api/migrations/postgres/V501__epoch_tables.sql +++ b/sequencer/api/migrations/postgres/V501__epoch_tables.sql @@ -23,7 +23,7 @@ CREATE TABLE undecided_state2 ( -- update that there is only a single entry in this table: the latest known state. id INT PRIMARY KEY, - leaves2 BYTEA NOT NULL, + leaves BYTEA NOT NULL, state BYTEA NOT NULL ); From 52749511911482c06cefd87e0e6ff211ae356917 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 21 Jan 2025 18:39:02 +0500 Subject: [PATCH 010/120] epoch integration test --- .env | 1 + Cargo.lock | 2 + client/Cargo.toml | 1 + client/src/lib.rs | 19 ++++- data/genesis/demo-epoch.toml | 2 +- justfile | 4 + process-compose.yaml | 4 +- sequencer-sqlite/Cargo.lock | 1 + sequencer/api/node.toml | 4 + sequencer/src/api.rs | 8 ++ sequencer/src/api/data_source.rs | 2 + sequencer/src/run.rs | 21 ++---- tests/Cargo.toml | 1 + tests/upgrades.rs | 125 ++++++++++++++++++++++++++++--- types/src/v0/mod.rs | 2 +- utils/src/stake_table.rs | 18 ++++- 16 files changed, 181 insertions(+), 34 deletions(-) diff --git a/.env b/.env index d716154cfb..84d24e9100 100644 --- a/.env +++ b/.env @@ -152,3 +152,4 @@ INTEGRATION_TEST_SEQUENCER_VERSION=02 # max database connections ESPRESSO_SEQUENCER_DATABASE_MAX_CONNECTIONS=25 + \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index a1f5e28679..eddeaacf4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1731,6 +1731,7 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "jf-merkle-tree", "surf-disco", "tokio", @@ -10028,6 +10029,7 @@ dependencies = [ "ethers", "futures", "reqwest 0.12.12", + "sequencer-utils", "surf-disco", "tokio", "vbs", diff --git a/client/Cargo.toml b/client/Cargo.toml index 85db2033cc..c38a613844 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -15,3 +15,4 @@ surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } +hotshot-types = { workspace = true } \ No newline at end of file diff --git a/client/src/lib.rs b/client/src/lib.rs index a9d5cc995f..572d2e796e 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,7 +1,8 @@ use anyhow::Context; -use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header}; +use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header, PubKey}; use ethers::types::Address; use futures::{stream::BoxStream, StreamExt}; +use hotshot_types::stake_table::StakeTableEntry; use jf_merkle_tree::{ prelude::{MerkleProof, Sha3Node}, MerkleTreeScheme, @@ -119,6 +120,22 @@ impl SequencerClient { let balance = proof.elem().copied().unwrap_or(0.into()); Ok(balance) } + + pub async fn current_epoch(&self) -> anyhow::Result { + self.0 + .get::("node/current_epoch") + .send() + .await + .context("getting epoch value") + } + + pub async fn stake_table(&self, epoch: u64) -> anyhow::Result>> { + self.0 + .get::<_>(&format!("node/stake-table/{epoch}")) + .send() + .await + .context("getting epoch value") + } } #[cfg(test)] diff --git a/data/genesis/demo-epoch.toml b/data/genesis/demo-epoch.toml index 5b351243f5..09f60756ab 100644 --- a/data/genesis/demo-epoch.toml +++ b/data/genesis/demo-epoch.toml @@ -18,7 +18,7 @@ timestamp = "1970-01-01T00:00:00Z" number = 0 [[upgrade]] -version = "0.99" +version = "0.3" start_proposing_view = 10 stop_proposing_view = 60 diff --git a/justfile b/justfile index 0424edadd5..4968621575 100644 --- a/justfile +++ b/justfile @@ -29,6 +29,10 @@ build profile="test": demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} +demo-native-epoch *args: build + # export DEMO_GENESIS_FILE=data/genesis/demo-epoch.toml + scripts/demo-native -f process-compose.yaml {{args}} + demo-native-benchmark: cargo build --release --features benchmarking scripts/demo-native diff --git a/process-compose.yaml b/process-compose.yaml index 281319cfff..d2fa812225 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -5,8 +5,8 @@ environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL=http://localhost:$ESPRESSO_ORCHESTRATOR_PORT - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo.toml - - ESPRESSO_BUILDER_GENESIS_FILE=data/genesis/demo.toml + - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-epoch.toml + - ESPRESSO_BUILDER_GENESIS_FILE=data/genesis/demo-epoch.toml - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=data/initial_stake_table.toml - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index d7b35400fb..920a60ce78 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -1661,6 +1661,7 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "jf-merkle-tree", "surf-disco", "tokio", diff --git a/sequencer/api/node.toml b/sequencer/api/node.toml index ddcdaa7db6..d2025500f7 100644 --- a/sequencer/api/node.toml +++ b/sequencer/api/node.toml @@ -6,3 +6,7 @@ DOC = "Get the stake table for the current epoch" PATH = ["stake-table/:epoch_number"] ":epoch_number" = "Integer" DOC = "Get the stake table for the given epoch" + +[route.current_epoch] +PATH = ["current_epoch"] +DOC = "Get the current epoch" \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index dee017a6fa..7e4b227556 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -170,6 +170,10 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> ) -> Vec::SignatureKey>> { self.as_ref().get_stake_table(epoch).await } + + async fn get_current_epoch(&self) -> ::Epoch { + self.as_ref().get_current_epoch().await + } } impl, V: Versions, P: SequencerPersistence> @@ -196,6 +200,10 @@ impl, V: Versions, P: SequencerPersistence> .await .stake_table(epoch) } + + async fn get_current_epoch(&self) -> ::Epoch { + self.consensus().await.read().await.cur_epoch().await + } } impl, V: Versions, P: SequencerPersistence> SubmitDataSource diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index d26dce301c..2029871687 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -122,6 +122,8 @@ pub(crate) trait StakeTableDataSource { &self, epoch: Option<::Epoch>, ) -> impl Send + Future>>; + + fn get_current_epoch(&self) -> impl Send + Future::Epoch>; } pub(crate) trait CatchupDataSource: Sync { diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 59cf5dcafc..d1cf4e3425 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -9,8 +9,8 @@ use super::{ }; use clap::Parser; use espresso_types::{ - traits::NullEventConsumer, FeeVersion, MarketplaceVersion, SequencerVersions, - SolverAuctionResultsProvider, V0_0, + traits::NullEventConsumer, EpochVersion, FeeVersion, MarketplaceVersion, SequencerVersions, + SolverAuctionResultsProvider, }; use futures::future::FutureExt; use hotshot::MarketplaceConfig; @@ -38,30 +38,21 @@ pub async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { + (FeeVersion::VERSION, EpochVersion::VERSION) => { run( genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } - (FeeVersion::VERSION, _) => { - run( - genesis, - modules, - opt, - SequencerVersions::::new(), - ) - .await - } - (MarketplaceVersion::VERSION, _) => { + (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run( genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } diff --git a/tests/Cargo.toml b/tests/Cargo.toml index eeb91c14a7..09ae39b76c 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -20,3 +20,4 @@ reqwest = { workspace = true, features = ["json"] } surf-disco = { workspace = true } tokio = { workspace = true } vbs = { workspace = true } +sequencer-utils = { path = "../utils" } \ No newline at end of file diff --git a/tests/upgrades.rs b/tests/upgrades.rs index 5d68938440..93e61bf347 100644 --- a/tests/upgrades.rs +++ b/tests/upgrades.rs @@ -1,8 +1,14 @@ +use std::{path::Path, time::Duration}; + use crate::common::TestConfig; use anyhow::Result; -use espresso_types::{FeeVersion, MarketplaceVersion}; +use client::SequencerClient; +use dotenvy::var; +use espresso_types::{EpochVersion, FeeVersion, MarketplaceVersion}; use futures::{future::join_all, StreamExt}; -use vbs::version::StaticVersionType; +use sequencer_utils::stake_table::{update_stake_table, PermissionedStakeTableUpdate}; +use tokio::time::sleep; +use vbs::version::{StaticVersionType, Version}; const SEQUENCER_BLOCKS_TIMEOUT: u64 = 200; @@ -12,10 +18,10 @@ async fn test_upgrade() -> Result<()> { let testing = TestConfig::new().await.unwrap(); - let versions = if testing.sequencer_version >= 3 { - (FeeVersion::version(), MarketplaceVersion::version()) - } else { - panic!("Invalid sequencer version provided for upgrade test."); + let (base, upgrade) = match testing.sequencer_version { + 3 => (FeeVersion::version(), EpochVersion::version()), + version if version > 3 => (FeeVersion::version(), MarketplaceVersion::version()), + _ => panic!("Invalid sequencer version provided for upgrade test."), }; println!("Waiting on readiness"); @@ -26,6 +32,23 @@ async fn test_upgrade() -> Result<()> { let clients = testing.sequencer_clients; + let height = test_header_version(clients.clone(), base, upgrade).await?; + // check that atleast 50 blocks are produced after the upgrade + test_blocks_production(clients.clone(), height, 50).await?; + + if upgrade == EpochVersion::version() { + test_stake_table_update(clients).await?; + } + + // TODO assert transactions are incrementing + Ok(()) +} + +async fn test_header_version( + clients: Vec, + base: Version, + upgrade: Version, +) -> Result { // Test is limited to those sequencers with correct modules // enabled. It would be less fragile if we could discover them. let subscriptions = join_all(clients.iter().map(|c| c.subscribe_headers(0))) @@ -34,7 +57,7 @@ async fn test_upgrade() -> Result<()> { .collect::>>()?; let mut stream = futures::stream::iter(subscriptions).flatten_unordered(None); - + let mut height = 0; while let Some(header) = stream.next().await { let header = header.unwrap(); println!( @@ -46,11 +69,12 @@ async fn test_upgrade() -> Result<()> { // TODO is it possible to discover the view at which upgrade should be finished? // First few views should be `Base` version. if header.height() <= 20 { - assert_eq!(header.version(), versions.0) + assert_eq!(header.version(), base) } - if header.version() == versions.1 { + if header.version() == upgrade { println!("header version matched! height={:?}", header.height()); + height = header.height(); break; } @@ -59,6 +83,87 @@ async fn test_upgrade() -> Result<()> { } } - // TODO assert transactions are incrementing + Ok(height) +} + +async fn test_blocks_production(clients: Vec, from: u64, num: u64) -> Result<()> { + let subscriptions = join_all(clients.iter().map(|c| c.subscribe_blocks(from))) + .await + .into_iter() + .collect::>>()?; + + let mut num_blocks = 0; + + for mut node in subscriptions { + while let Some(block) = node.next().await { + let _block = block.unwrap(); + num_blocks += 1; + if num_blocks == num { + break; + } + } + + num_blocks = 0; + } + + Ok(()) +} + +async fn test_stake_table_update(clients: Vec) -> Result<()> { + /* + EPOCH V3 + */ + + let rpc_url = var("ESPRESSO_SEQUENCER_L1_PROVIDER")?; + let account_index = var("ESPRESSO_DEPLOYER_ACCOUNT_INDEX")?; + let contract_address = var("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")?; + let initial_stake_table_path = var("ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")?; + + let permissioned_stake_table = + PermissionedStakeTableUpdate::from_toml_file(Path::new(&initial_stake_table_path))?; + + // initial stake table has 5 new stakers + + let new_stakers = permissioned_stake_table.new_stakers; + //lets remove one + let staker_removed = new_stakers[0].clone(); + + let st_with_one_removed = + PermissionedStakeTableUpdate::new(vec![staker_removed.clone()], vec![]); + let client = clients[0].clone(); + + let epoch_before_update = client.current_epoch().await?; + + update_stake_table( + rpc_url.parse()?, + Duration::from_secs(7), + "test test test test test test test test test test test junk".to_string(), + account_index.parse()?, + contract_address.parse()?, + st_with_one_removed, + ) + .await?; + + loop { + sleep(Duration::from_secs(10)).await; + let epoch = clients[0].current_epoch().await?; + + if epoch > epoch_before_update { + let stake_table = client.stake_table(epoch).await?; + assert_eq!(stake_table.len(), 4); + + assert!( + stake_table + .iter() + .all(|st| st.stake_key != staker_removed.stake_table_key), + "Entry for {} already exists in the stake table", + staker_removed.stake_table_key + ); + + break; + } + } + // TODO: randomize this test + Ok(()) } diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index 40c3811a6a..8275709128 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -178,8 +178,8 @@ pub type MockSequencerVersions = SequencerVersions, StaticVe pub type V0_0 = StaticVersion<0, 0>; pub type V0_1 = StaticVersion<0, 1>; pub type FeeVersion = StaticVersion<0, 2>; +pub type EpochVersion = StaticVersion<0, 3>; pub type MarketplaceVersion = StaticVersion<0, 99>; -pub type EpochVersion = StaticVersion<0, 100>; pub type Leaf = hotshot_types::data::Leaf; pub type Leaf2 = hotshot_types::data::Leaf2; diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 94a3eac1d9..b303d070e5 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -57,8 +57,8 @@ impl From for Vec { } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug, From, PartialEq)] -struct StakerIdentity { - stake_table_key: BLSPubKey, +pub struct StakerIdentity { + pub stake_table_key: BLSPubKey, } impl From for BLSPubKey { @@ -72,12 +72,22 @@ impl From for BLSPubKey { #[serde(bound(deserialize = ""))] pub struct PermissionedStakeTableUpdate { #[serde(default)] - stakers_to_remove: Vec, + pub stakers_to_remove: Vec, #[serde(default)] - new_stakers: Vec>, + pub new_stakers: Vec>, } impl PermissionedStakeTableUpdate { + pub fn new( + new_stakers: Vec>, + stakers_to_remove: Vec, + ) -> Self { + Self { + stakers_to_remove, + new_stakers, + } + } + pub fn from_toml_file(path: &Path) -> anyhow::Result { let config_file_as_string: String = fs::read_to_string(path) .unwrap_or_else(|_| panic!("Could not read config file located at {}", path.display())); From 3885e2148058bf942447c5264e8da9e016c39721 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 24 Jan 2025 00:41:48 +0500 Subject: [PATCH 011/120] bincode deserialize for migrated hashset --- sequencer/src/persistence/fs.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 385a4bf02e..11dd5be65a 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -103,13 +103,9 @@ impl PersistenceOptions for Options { let migration_path = path.join("migration"); let migrated = if migration_path.is_file() { - let bytes = fs::read(&path).context(format!( - "unable to read leaf migration from {}", - path.display() - ))?; - let json = serde_json::from_slice(&bytes).context("config file is not valid JSON")?; - - serde_json::from_value(json).context("malformed config file")? + let bytes = fs::read(&path) + .context(format!("unable to read migration from {}", path.display()))?; + bincode::deserialize(&bytes).context("malformed migration file")? } else { HashSet::new() }; From 5e27fc281b23c65487c8ceeee5ff7b19f325aaa0 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 30 Jan 2025 18:22:01 +0500 Subject: [PATCH 012/120] DEMO_GENESIS_FILE env --- justfile | 3 +-- process-compose.yaml | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/justfile b/justfile index 05c8e2881a..b8eb2ad79c 100644 --- a/justfile +++ b/justfile @@ -30,8 +30,7 @@ demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} demo-native-epoch *args: build - # export DEMO_GENESIS_FILE=data/genesis/demo-epoch.toml - scripts/demo-native -f process-compose.yaml {{args}} + DEMO_GENESIS_FILE=data/genesis/demo-epoch.toml scripts/demo-native -f process-compose.yaml {{args}} demo-native-benchmark: cargo build --release --features benchmarking diff --git a/process-compose.yaml b/process-compose.yaml index d2fa812225..d50605c2ee 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -5,8 +5,8 @@ environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL=http://localhost:$ESPRESSO_ORCHESTRATOR_PORT - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-epoch.toml - - ESPRESSO_BUILDER_GENESIS_FILE=data/genesis/demo-epoch.toml + - ESPRESSO_SEQUENCER_GENESIS_FILE=$DEMO_GENESIS_FILE + - ESPRESSO_BUILDER_GENESIS_FILE=$DEMO_GENESIS_FILE - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=data/initial_stake_table.toml - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ From a1803f7793baa9160fd3382bd9c773165354ba5e Mon Sep 17 00:00:00 2001 From: tbro Date: Mon, 3 Feb 2025 18:42:52 -0300 Subject: [PATCH 013/120] fix error en `.env` file --- .env | 1 - 1 file changed, 1 deletion(-) diff --git a/.env b/.env index 84d24e9100..d716154cfb 100644 --- a/.env +++ b/.env @@ -152,4 +152,3 @@ INTEGRATION_TEST_SEQUENCER_VERSION=02 # max database connections ESPRESSO_SEQUENCER_DATABASE_MAX_CONNECTIONS=25 - \ No newline at end of file From d688b6c665fd41c3efef0314c178132a7f17469a Mon Sep 17 00:00:00 2001 From: tbro Date: Mon, 3 Feb 2025 19:09:47 -0300 Subject: [PATCH 014/120] add pos test to justfile and CI workflow --- .github/workflows/test.yml | 7 +++++-- justfile | 7 ++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 852dd0b9fe..7ce93d29b2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -182,11 +182,14 @@ jobs: needs: [build-test-bins, build-test-artifacts-postgres] strategy: matrix: - version: [02,99] + version: [02,03,99] include: - version: 02 compose: "-f process-compose.yaml -D" - + - version: 03 + env: + DEMO_GENESIS_FILE: data/genesis/demo-epoch.toml + compose: "-f process-compose.yaml -D" - version: 99 compose: "-f process-compose.yaml -f process-compose-mp.yml -D" runs-on: ubuntu-latest diff --git a/justfile b/justfile index b8eb2ad79c..2005f5693d 100644 --- a/justfile +++ b/justfile @@ -29,7 +29,7 @@ build profile="test": demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} -demo-native-epoch *args: build +demo-native-pos *args: build DEMO_GENESIS_FILE=data/genesis/demo-epoch.toml scripts/demo-native -f process-compose.yaml {{args}} demo-native-benchmark: @@ -74,10 +74,15 @@ test-all: test-integration: @echo 'NOTE that demo-native must be running for this test to succeed.' INTEGRATION_TEST_SEQUENCER_VERSION=2 cargo nextest run --all-features --nocapture --profile integration smoke + test-integration-mp: @echo 'NOTE that demo-native-mp must be running for this test to succeed.' INTEGRATION_TEST_SEQUENCER_VERSION=99 cargo nextest run --all-features --nocapture --profile integration +test-integration-pos: + @echo 'NOTE that demo-native-pos must be running for this test to succeed.' + INTEGRATION_TEST_SEQUENCER_VERSION=3 cargo nextest run --all-features --nocapture --profile integration smoke + clippy: @echo 'features: "embedded-db"' cargo clippy --workspace --features embedded-db --all-targets -- -D warnings From 697d02246e2fda5401ad132978a7800e76cdb53f Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 4 Feb 2025 19:12:26 +0500 Subject: [PATCH 015/120] run demo native with base version set to pos --- .github/workflows/test.yml | 2 +- data/genesis/demo-pos-base.toml | 21 +++++++++++++++++++ .../{demo-epoch.toml => demo-pos.toml} | 0 justfile | 5 ++++- sequencer/src/run.rs | 11 +++++++++- 5 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 data/genesis/demo-pos-base.toml rename data/genesis/{demo-epoch.toml => demo-pos.toml} (100%) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7ce93d29b2..c4dda28b0f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -188,7 +188,7 @@ jobs: compose: "-f process-compose.yaml -D" - version: 03 env: - DEMO_GENESIS_FILE: data/genesis/demo-epoch.toml + DEMO_GENESIS_FILE: data/genesis/demo-pos.toml compose: "-f process-compose.yaml -D" - version: 99 compose: "-f process-compose.yaml -f process-compose-mp.yml -D" diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml new file mode 100644 index 0000000000..38fb3deff5 --- /dev/null +++ b/data/genesis/demo-pos-base.toml @@ -0,0 +1,21 @@ +base_version = "0.3" +upgrade_version = "0.0" + +[stake_table] +capacity = 10 + +[chain_config] +chain_id = 999999999 +max_block_size = '1mb' +base_fee = '1 wei' +fee_recipient = "0x0000000000000000000000000000000000000000" +bid_recipient = "0x0000000000000000000000000000000000000000" +fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" +stake_table_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" + +[header] +timestamp = "1970-01-01T00:00:00Z" + +[l1_finalized] +number = 0 + \ No newline at end of file diff --git a/data/genesis/demo-epoch.toml b/data/genesis/demo-pos.toml similarity index 100% rename from data/genesis/demo-epoch.toml rename to data/genesis/demo-pos.toml diff --git a/justfile b/justfile index 2005f5693d..1877c7eca3 100644 --- a/justfile +++ b/justfile @@ -30,7 +30,10 @@ demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} demo-native-pos *args: build - DEMO_GENESIS_FILE=data/genesis/demo-epoch.toml scripts/demo-native -f process-compose.yaml {{args}} + DEMO_GENESIS_FILE=data/genesis/demo-pos.toml scripts/demo-native -f process-compose.yaml {{args}} + +demo-native-pos-base *args: build + DEMO_GENESIS_FILE=data/genesis/demo-pos-base.toml scripts/demo-native -f process-compose.yaml {{args}} demo-native-benchmark: cargo build --release --features benchmarking diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index d1cf4e3425..7eee2e2896 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -10,7 +10,7 @@ use super::{ use clap::Parser; use espresso_types::{ traits::NullEventConsumer, EpochVersion, FeeVersion, MarketplaceVersion, SequencerVersions, - SolverAuctionResultsProvider, + SolverAuctionResultsProvider, V0_0, }; use futures::future::FutureExt; use hotshot::MarketplaceConfig; @@ -47,6 +47,15 @@ pub async fn main() -> anyhow::Result<()> { ) .await } + (EpochVersion::VERSION, _) => { + run( + genesis, + modules, + opt, + SequencerVersions::::new(), + ) + .await + } (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run( genesis, From 7786cfb2394604270fc78f4ebf26a5659e74bb68 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 11:48:52 -0300 Subject: [PATCH 016/120] Marketplace signatures should engage @ v99 --- types/src/v0/impls/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 88d866c54c..a9a3ebc7e6 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -667,7 +667,7 @@ fn validate_builder_fee( // TODO Marketplace signatures are placeholders for now. In // finished Marketplace signatures will cover the full // transaction. - if version.minor >= 3 { + if version.minor >= 99 { fee_info .account() .validate_sequencing_fee_signature_marketplace( From 5f1a2b8074143a8b0a4289cd8bfa31749af68ef4 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 4 Feb 2025 21:57:36 +0500 Subject: [PATCH 017/120] set epoch-height to 150 --- builder/src/lib.rs | 2 +- hotshot-query-service/examples/simple-server.rs | 2 +- hotshot-query-service/src/testing/consensus.rs | 2 +- sequencer/src/lib.rs | 5 ++++- types/src/v0/traits.rs | 3 ++- 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/builder/src/lib.rs b/builder/src/lib.rs index a8805a7da6..0cb5a108f0 100755 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -138,7 +138,7 @@ pub mod testing { start_voting_time: 0, stop_proposing_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; Self { diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index d22c341858..aeb907d9c7 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -216,7 +216,7 @@ async fn init_consensus( stop_proposing_time: 0, start_voting_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; let nodes = join_all(priv_keys.into_iter().zip(data_sources).enumerate().map( diff --git a/hotshot-query-service/src/testing/consensus.rs b/hotshot-query-service/src/testing/consensus.rs index e8b94af9ef..06d7565fa3 100644 --- a/hotshot-query-service/src/testing/consensus.rs +++ b/hotshot-query-service/src/testing/consensus.rs @@ -143,7 +143,7 @@ impl MockNetwork { stop_proposing_time: 0, start_voting_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; update_config(&mut config); diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 4844fda77c..ef34353217 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -361,6 +361,9 @@ pub async fn init_node( upgrade.set_hotshot_config_parameters(&mut network_config.config); } + //todo(abdul): get from genesis file + network_config.config.epoch_height = 150; + // If the `Libp2p` bootstrap nodes were supplied via the command line, override those // present in the config file. if let Some(bootstrap_nodes) = network_params.libp2p_bootstrap_nodes { @@ -806,7 +809,7 @@ pub mod testing { start_voting_time: 0, stop_proposing_time: 0, stop_voting_time: 0, - epoch_height: 0, + epoch_height: 150, }; Self { diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 7f352fdbd5..66e287e01f 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -582,7 +582,8 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Ok(( HotShotInitializer { instance_state: state, - epoch_height: 0, + // todo(abdul): load from storage? + epoch_height: 150, anchor_leaf: leaf, anchor_state: validated_state.unwrap_or_default(), anchor_state_delta: None, From cac2b3b07a81dae816c17b3557b69914a2cff41a Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 16:45:31 -0300 Subject: [PATCH 018/120] process-compose: remove fund-builder condition from `permissionless-builder` --- .github/workflows/test.yml | 2 +- process-compose.yaml | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c4dda28b0f..645d051217 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -226,7 +226,7 @@ jobs: NEXTEST_PROFILE: integration INTEGRATION_TEST_SEQUENCER_VERSION: ${{ matrix.version }} run: | - cargo nextest run --archive-file nextest-archive-postgres.tar.zst --verbose --no-fail-fast --nocapture \ + cargo nextest run --archive-file nextest-archive-postgres.tar.zst --verbose --no-fail-fast \ --workspace-remap $PWD $(if [ "${{ matrix.version }}" == "2" ]; then echo " smoke"; fi) timeout-minutes: 10 diff --git a/process-compose.yaml b/process-compose.yaml index d50605c2ee..558f3d918e 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -522,15 +522,13 @@ processes: depends_on: sequencer0: condition: process_healthy - fund-builder: - condition: process_completed readiness_probe: http_get: scheme: http host: localhost port: $ESPRESSO_BUILDER_SERVER_PORT path: /healthcheck - failure_threshold: 100 + failure_threshold: 5 period_seconds: 1 availability: restart: "exit_on_failure" From 4112773773e675ad69f60fd6a06015068fa224f5 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 18:31:20 -0300 Subject: [PATCH 019/120] Fix path to default genesis path It appears the CLI default has drifted from real path. Also the new DEMO_GENESIS_FILE var breaks v2 demo native. So removing it for now. --- .env | 2 +- process-compose.yaml | 6 ++++-- sequencer/src/options.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) mode change 100644 => 100755 sequencer/src/options.rs diff --git a/.env b/.env index d716154cfb..2beaa09a17 100644 --- a/.env +++ b/.env @@ -33,7 +33,7 @@ ESPRESSO_SEQUENCER4_API_PORT=24004 ESPRESSO_SEQUENCER_URL=http://sequencer0:${ESPRESSO_SEQUENCER_API_PORT} ESPRESSO_SEQUENCER_MAX_CONNECTIONS=25 ESPRESSO_SEQUENCER_STORAGE_PATH=/store/sequencer -ESPRESSO_SEQUENCER_GENESIS_FILE=/genesis/demo.toml +ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo.toml ESPRESSO_SEQUENCER_L1_PORT=8545 ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL=100ms ESPRESSO_SEQUENCER_L1_WS_PORT=8546 diff --git a/process-compose.yaml b/process-compose.yaml index 558f3d918e..450e9fd810 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -5,8 +5,8 @@ environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL=http://localhost:$ESPRESSO_ORCHESTRATOR_PORT - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - - ESPRESSO_SEQUENCER_GENESIS_FILE=$DEMO_GENESIS_FILE - - ESPRESSO_BUILDER_GENESIS_FILE=$DEMO_GENESIS_FILE + # - ESPRESSO_SEQUENCER_GENESIS_FILE=$DEMO_GENESIS_FILE + # - ESPRESSO_BUILDER_GENESIS_FILE=$DEMO_GENESIS_FILE - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=data/initial_stake_table.toml - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ @@ -522,6 +522,8 @@ processes: depends_on: sequencer0: condition: process_healthy + fund-builder: + condition: process_completed readiness_probe: http_get: scheme: http diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs old mode 100644 new mode 100755 index 05dbf4b39b..c818ddd401 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -284,7 +284,7 @@ pub struct Options { long, name = "GENESIS_FILE", env = "ESPRESSO_SEQUENCER_GENESIS_FILE", - default_value = "/genesis/demo.toml" + default_value = "/data/genesis/demo.toml" )] pub genesis_file: PathBuf, From ab845844f5c566a8f7e2d259354f0111bf343d05 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 20:57:25 -0300 Subject: [PATCH 020/120] Some fixes Use stander env var to set genesis.file and set upgrade version to 0.3. Also make some changes to chain config to make it easier to notice if we are supplying the correct one or not. --- data/genesis/demo-pos-base.toml | 8 ++++---- justfile | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml index 38fb3deff5..4375fff290 100644 --- a/data/genesis/demo-pos-base.toml +++ b/data/genesis/demo-pos-base.toml @@ -1,15 +1,15 @@ base_version = "0.3" -upgrade_version = "0.0" +upgrade_version = "0.3" [stake_table] capacity = 10 [chain_config] chain_id = 999999999 -max_block_size = '1mb' -base_fee = '1 wei' +max_block_size = '2mb' +base_fee = '3 wei' fee_recipient = "0x0000000000000000000000000000000000000000" -bid_recipient = "0x0000000000000000000000000000000000000000" +# bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" stake_table_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" diff --git a/justfile b/justfile index 1877c7eca3..1e0054356f 100644 --- a/justfile +++ b/justfile @@ -30,10 +30,10 @@ demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} demo-native-pos *args: build - DEMO_GENESIS_FILE=data/genesis/demo-pos.toml scripts/demo-native -f process-compose.yaml {{args}} + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos.toml scripts/demo-native -f process-compose.yaml {{args}} demo-native-pos-base *args: build - DEMO_GENESIS_FILE=data/genesis/demo-pos-base.toml scripts/demo-native -f process-compose.yaml {{args}} + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos-base.toml scripts/demo-native -f process-compose.yaml {{args}} demo-native-benchmark: cargo build --release --features benchmarking From 8ed730f54a39615a18ea9ef733f5df1fffeae033 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 20:59:24 -0300 Subject: [PATCH 021/120] temporarily disable versions we don't need for the current objective --- sequencer/src/run.rs | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 7eee2e2896..7bdc0428f9 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -38,15 +38,6 @@ pub async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - (FeeVersion::VERSION, EpochVersion::VERSION) => { - run( - genesis, - modules, - opt, - SequencerVersions::::new(), - ) - .await - } (EpochVersion::VERSION, _) => { run( genesis, @@ -56,15 +47,7 @@ pub async fn main() -> anyhow::Result<()> { ) .await } - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { - run( - genesis, - modules, - opt, - SequencerVersions::::new(), - ) - .await - } + _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), From 6f65adf76c0f7862a81e49798143d368b38e2058 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 21:00:47 -0300 Subject: [PATCH 022/120] only run v 3 in CI for now --- .github/workflows/test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 645d051217..c1af831d7c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -182,13 +182,13 @@ jobs: needs: [build-test-bins, build-test-artifacts-postgres] strategy: matrix: - version: [02,03,99] + version: [03] include: - version: 02 compose: "-f process-compose.yaml -D" - version: 03 env: - DEMO_GENESIS_FILE: data/genesis/demo-pos.toml + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos-base.toml compose: "-f process-compose.yaml -D" - version: 99 compose: "-f process-compose.yaml -f process-compose-mp.yml -D" From 213baf0ed4b6cbe5d95d046a41032cf0604032bb Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 4 Feb 2025 21:01:09 -0300 Subject: [PATCH 023/120] avoid redirect in header stream --- client/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/src/lib.rs b/client/src/lib.rs index 572d2e796e..aee2500074 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -52,7 +52,7 @@ impl SequencerClient { height: u64, ) -> anyhow::Result>> { self.0 - .socket(&format!("availability/stream/headers/{height}")) + .socket(&format!("v0/availability/stream/headers/{height}")) .subscribe::

() .await .context("subscribing to Espresso headers") From 37307e47d4ea8a00de8311963a115bd15f77f3ff Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 5 Feb 2025 11:32:39 -0300 Subject: [PATCH 024/120] Use named version (instead if integer) --- types/src/v0/impls/state.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index a9a3ebc7e6..ebee2cf346 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -23,11 +23,11 @@ use serde::{Deserialize, Serialize}; use std::ops::Add; use thiserror::Error; use time::OffsetDateTime; -use vbs::version::Version; +use vbs::version::{StaticVersionType, Version}; use super::{ auction::ExecutionError, fee_info::FeeError, instance_state::NodeState, BlockMerkleCommitment, - BlockSize, FeeMerkleCommitment, L1Client, + BlockSize, FeeMerkleCommitment, L1Client, MarketplaceVersion, }; use crate::{ traits::StateCatchup, @@ -667,7 +667,7 @@ fn validate_builder_fee( // TODO Marketplace signatures are placeholders for now. In // finished Marketplace signatures will cover the full // transaction. - if version.minor >= 99 { + if version.minor >= MarketplaceVersion::MINOR { fee_info .account() .validate_sequencing_fee_signature_marketplace( From 3ae3f660080704a1b492ede8a540d888ec5f3ed5 Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 5 Feb 2025 14:04:41 -0300 Subject: [PATCH 025/120] Add missing header type + conversions --- types/src/v0/impls/header.rs | 4 +- types/src/v0/v0_3/chain_config.rs | 16 ++++---- types/src/v0/v0_3/header.rs | 61 ++++++++++++++++++++++++++++++ types/src/v0/v0_3/mod.rs | 17 +++++---- types/src/v0/v0_99/chain_config.rs | 53 ++++++++++++++++++++++---- 5 files changed, 126 insertions(+), 25 deletions(-) create mode 100644 types/src/v0/v0_3/header.rs diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index bb13ec8ab4..99189198f0 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -329,7 +329,7 @@ impl Header { builder_signature: builder_signature.first().copied(), }), 3 => Self::V3(v0_3::Header { - chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config: v0_3::ResolvableChainConfig::from(v0_3::ChainConfig::from( chain_config, )), height, @@ -550,7 +550,7 @@ impl Header { builder_signature: builder_signature.first().copied(), }), 3 => Self::V3(v0_3::Header { - chain_config: v0_1::ResolvableChainConfig::from(v0_1::ChainConfig::from( + chain_config: v0_3::ResolvableChainConfig::from(v0_3::ChainConfig::from( chain_config, )), height, diff --git a/types/src/v0/v0_3/chain_config.rs b/types/src/v0/v0_3/chain_config.rs index dbc73cb589..f82521377c 100644 --- a/types/src/v0/v0_3/chain_config.rs +++ b/types/src/v0/v0_3/chain_config.rs @@ -1,4 +1,4 @@ -use crate::{v0_1, BlockSize, ChainId, FeeAccount, FeeAmount}; +use crate::{v0_1, v0_99, BlockSize, ChainId, FeeAccount, FeeAmount}; use committable::{Commitment, Committable}; use ethers::types::{Address, U256}; use itertools::Either; @@ -74,13 +74,13 @@ impl Committable for ChainConfig { } impl ResolvableChainConfig { - pub fn _commit(&self) -> Commitment { + pub fn commit(&self) -> Commitment { match self.chain_config { Either::Left(config) => config.commit(), Either::Right(commitment) => commitment, } } - pub fn _resolve(self) -> Option { + pub fn resolve(self) -> Option { match self.chain_config { Either::Left(config) => Some(config), Either::Right(_) => None, @@ -141,23 +141,25 @@ impl From for ChainConfig { } } -impl From for v0_1::ChainConfig { - fn from(chain_config: ChainConfig) -> v0_1::ChainConfig { - let ChainConfig { +impl From for ChainConfig { + fn from(chain_config: v0_99::ChainConfig) -> ChainConfig { + let v0_99::ChainConfig { chain_id, max_block_size, base_fee, fee_contract, fee_recipient, + stake_table_contract, .. } = chain_config; - v0_1::ChainConfig { + ChainConfig { chain_id, max_block_size, base_fee, fee_contract, fee_recipient, + stake_table_contract, } } } diff --git a/types/src/v0/v0_3/header.rs b/types/src/v0/v0_3/header.rs new file mode 100644 index 0000000000..c4dd120916 --- /dev/null +++ b/types/src/v0/v0_3/header.rs @@ -0,0 +1,61 @@ +use crate::NsTable; + +use super::{ + BlockMerkleCommitment, BuilderSignature, FeeInfo, FeeMerkleCommitment, L1BlockInfo, + ResolvableChainConfig, +}; +use ark_serialize::CanonicalSerialize; +use committable::{Commitment, Committable, RawCommitmentBuilder}; +use hotshot_types::{utils::BuilderCommitment, vid::VidCommitment}; +use serde::{Deserialize, Serialize}; + +/// A header is like a [`Block`] with the body replaced by a digest. +#[derive(Clone, Debug, Deserialize, Serialize, Hash, PartialEq, Eq)] +pub struct Header { + /// A commitment to a ChainConfig or a full ChainConfig. + pub(crate) chain_config: ResolvableChainConfig, + pub(crate) height: u64, + pub(crate) timestamp: u64, + pub(crate) l1_head: u64, + pub(crate) l1_finalized: Option, + pub(crate) payload_commitment: VidCommitment, + pub(crate) builder_commitment: BuilderCommitment, + pub(crate) ns_table: NsTable, + pub(crate) block_merkle_tree_root: BlockMerkleCommitment, + pub(crate) fee_merkle_tree_root: FeeMerkleCommitment, + pub(crate) fee_info: FeeInfo, + pub(crate) builder_signature: Option, +} + +impl Committable for Header { + fn commit(&self) -> Commitment { + let mut bmt_bytes = vec![]; + self.block_merkle_tree_root + .serialize_with_mode(&mut bmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + let mut fmt_bytes = vec![]; + self.fee_merkle_tree_root + .serialize_with_mode(&mut fmt_bytes, ark_serialize::Compress::Yes) + .unwrap(); + + RawCommitmentBuilder::new(&Self::tag()) + .field("chain_config", self.chain_config.commit()) + .u64_field("height", self.height) + .u64_field("timestamp", self.timestamp) + .u64_field("l1_head", self.l1_head) + .optional("l1_finalized", &self.l1_finalized) + .constant_str("payload_commitment") + .fixed_size_bytes(self.payload_commitment.as_ref().as_ref()) + .constant_str("builder_commitment") + .fixed_size_bytes(self.builder_commitment.as_ref()) + .field("ns_table", self.ns_table.commit()) + .var_size_field("block_merkle_tree_root", &bmt_bytes) + .var_size_field("fee_merkle_tree_root", &fmt_bytes) + .field("fee_info", self.fee_info.commit()) + .finalize() + } + + fn tag() -> String { + crate::v0_1::Header::tag() + } +} diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index 23c8e3a021..9c7789d1e9 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -4,14 +4,13 @@ use vbs::version::Version; pub use super::v0_1::{ AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, FeeMerkleCommitment, FeeMerkleProof, - FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, - NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, - NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, - NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, ResolvableChainConfig, TimeBasedUpgrade, - Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, TxTableEntries, - TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, - BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, + FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, + NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, + NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, + NumTxsUnchecked, Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, + TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, + NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; pub(crate) use super::v0_1::{ L1ClientMetrics, L1Event, L1Provider, L1State, L1UpdateTask, MultiRpcClient, @@ -21,7 +20,9 @@ pub(crate) use super::v0_1::{ pub const VERSION: Version = Version { major: 0, minor: 3 }; mod chain_config; +mod header; mod stake_table; pub use chain_config::*; +pub use header::Header; pub use stake_table::*; diff --git a/types/src/v0/v0_99/chain_config.rs b/types/src/v0/v0_99/chain_config.rs index 420580d7af..cd9fd7cba5 100644 --- a/types/src/v0/v0_99/chain_config.rs +++ b/types/src/v0/v0_99/chain_config.rs @@ -130,6 +130,21 @@ impl From<&v0_1::ResolvableChainConfig> for ResolvableChainConfig { } } +impl From<&v0_3::ResolvableChainConfig> for ResolvableChainConfig { + fn from( + &v0_3::ResolvableChainConfig { chain_config }: &v0_3::ResolvableChainConfig, + ) -> ResolvableChainConfig { + match chain_config { + Either::Left(chain_config) => ResolvableChainConfig { + chain_config: Either::Left(ChainConfig::from(chain_config)), + }, + Either::Right(c) => ResolvableChainConfig { + chain_config: Either::Right(Commitment::from_raw(*c.as_ref())), + }, + } + } +} + impl From for ChainConfig { fn from(chain_config: v0_1::ChainConfig) -> ChainConfig { let v0_1::ChainConfig { @@ -217,22 +232,44 @@ mod test { use super::*; #[test] - fn test_upgrade_chain_config_v3_resolvable_chain_config_from_v1() { + fn test_upgrade_chain_config_v99_resolvable_chain_config_from_v1() { let expectation: ResolvableChainConfig = ChainConfig::default().into(); let v1_resolvable: v0_1::ResolvableChainConfig = v0_1::ChainConfig::default().into(); - let v3_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); - assert_eq!(expectation, v3_resolvable); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); + assert_eq!(expectation, v99_resolvable); let expectation: ResolvableChainConfig = ChainConfig::default().commit().into(); let v1_resolvable: v0_1::ResolvableChainConfig = v0_1::ChainConfig::default().commit().into(); - let v3_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); - assert_eq!(expectation, v3_resolvable); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v1_resolvable); + assert_eq!(expectation, v99_resolvable); } + #[test] - fn test_upgrade_chain_config_v1_chain_config_from_v3() { + fn test_upgrade_chain_config_v99_resolvable_chain_config_from_v3() { + let expectation: ResolvableChainConfig = ChainConfig::default().into(); + let v3_resolvable: v0_3::ResolvableChainConfig = v0_3::ChainConfig::default().into(); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v3_resolvable); + assert_eq!(expectation, v99_resolvable); + let expectation: ResolvableChainConfig = ChainConfig::default().commit().into(); + let v3_resolvable: v0_3::ResolvableChainConfig = + v0_3::ChainConfig::default().commit().into(); + let v99_resolvable: ResolvableChainConfig = ResolvableChainConfig::from(&v3_resolvable); + assert_eq!(expectation, v99_resolvable); + } + + #[test] + fn test_upgrade_chain_config_v1_chain_config_from_v99() { let expectation = v0_1::ChainConfig::default(); - let v3_chain_config = ChainConfig::default(); - let v1_chain_config = v0_1::ChainConfig::from(v3_chain_config); + let v99_chain_config = ChainConfig::default(); + let v1_chain_config = v0_1::ChainConfig::from(v99_chain_config); assert_eq!(expectation, v1_chain_config); } + + #[test] + fn test_upgrade_chain_config_v3_chain_config_from_v99() { + let expectation = v0_3::ChainConfig::default(); + let v99_chain_config = ChainConfig::default(); + let v3_chain_config = v0_3::ChainConfig::from(v99_chain_config); + assert_eq!(expectation, v3_chain_config); + } } From 7c6222b441563c565036a7bfb53696d4993012bc Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 5 Feb 2025 14:19:33 -0300 Subject: [PATCH 026/120] bump todo_by by 1 month (#2534) The date has passed again leading to warnings/errors during compilation. --- sequencer/src/network/cdn.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sequencer/src/network/cdn.rs b/sequencer/src/network/cdn.rs index cc524b026a..adce13a758 100644 --- a/sequencer/src/network/cdn.rs +++ b/sequencer/src/network/cdn.rs @@ -80,7 +80,7 @@ impl SignatureScheme for WrappedSignatureKey { }; todo_by!( - "2025-2-4", + "2025-3-4", "Only accept the namespaced message once everyone has upgraded" ); public_key.0.validate(&signature, message) @@ -112,7 +112,7 @@ impl RunDef for ProductionDef { } todo_by!( - "2025-2-4", + "2025-3-4", "Remove this, switching to TCP+TLS singularly when everyone has updated" ); /// The user definition for the Push CDN. From a65cbb82e28f89fe00b328d245661821b1469082 Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 5 Feb 2025 14:39:43 -0300 Subject: [PATCH 027/120] Put expected fee back as it was --- data/genesis/demo-pos-base.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml index 4375fff290..b7a5994ced 100644 --- a/data/genesis/demo-pos-base.toml +++ b/data/genesis/demo-pos-base.toml @@ -6,8 +6,8 @@ capacity = 10 [chain_config] chain_id = 999999999 -max_block_size = '2mb' -base_fee = '3 wei' +max_block_size = '1mb' +base_fee = '1 wei' fee_recipient = "0x0000000000000000000000000000000000000000" # bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" From 9613ce1b242c2436bfbb21b0b2d8f359483f7267 Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 5 Feb 2025 17:58:49 -0300 Subject: [PATCH 028/120] Add stake tables up to epoch 2 (inclusive) --- types/src/v0/impls/stake_table.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 9b62a09515..81acfcb0ad 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -240,10 +240,11 @@ impl EpochCommittees { indexed_da_members, }; + // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 and 2 let mut map = HashMap::new(); - map.insert(Epoch::genesis(), members.clone()); - // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 - map.insert(Epoch::genesis() + 1u64, members.clone()); + for epoch in Epoch::genesis().u64()..=2 { + map.insert(Epoch::new(epoch), members.clone()); + } Self { non_epoch_committee: members, From cbb414c229918d127cfbdd17266a58114cf68905 Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 5 Feb 2025 18:14:45 -0300 Subject: [PATCH 029/120] add stake table for ..=10 --- types/src/v0/impls/stake_table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 81acfcb0ad..802e59af9c 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -242,7 +242,7 @@ impl EpochCommittees { // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 and 2 let mut map = HashMap::new(); - for epoch in Epoch::genesis().u64()..=2 { + for epoch in Epoch::genesis().u64()..=10 { map.insert(Epoch::new(epoch), members.clone()); } From 0152568a21c2a723c5a614459ffe2b0b664d2e17 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 6 Feb 2025 18:17:19 +0500 Subject: [PATCH 030/120] query service leaf2 migration --- .../postgres/V500__leaf2_migration.sql | 15 ++ .../sqlite/V300__leaf2_migration.sql | 15 ++ hotshot-query-service/src/availability.rs | 10 +- .../src/availability/query_data.rs | 28 +- hotshot-query-service/src/data_source.rs | 28 +- .../src/data_source/storage/sql.rs | 254 +++++++++++++++++- .../src/data_source/storage/sql/queries.rs | 17 +- .../storage/sql/queries/availability.rs | 6 +- .../data_source/storage/sql/queries/node.rs | 2 +- .../data_source/storage/sql/transaction.rs | 2 +- .../src/data_source/update.rs | 47 +--- hotshot-query-service/src/lib.rs | 10 +- .../v0/create_node_validator_api.rs | 13 +- node-metrics/src/api/node_validator/v0/mod.rs | 22 +- node-metrics/src/service/client_state/mod.rs | 9 +- node-metrics/src/service/data_state/mod.rs | 15 +- sequencer/src/api.rs | 13 +- sequencer/src/api/sql.rs | 2 +- sequencer/src/persistence.rs | 23 +- sequencer/src/persistence/fs.rs | 21 +- sequencer/src/persistence/no_storage.rs | 23 +- sequencer/src/persistence/sql.rs | 33 ++- types/src/v0/traits.rs | 24 +- types/src/v0/utils.rs | 32 +-- 24 files changed, 458 insertions(+), 206 deletions(-) create mode 100644 hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql create mode 100644 hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql diff --git a/hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql b/hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql new file mode 100644 index 0000000000..69f562df7b --- /dev/null +++ b/hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql @@ -0,0 +1,15 @@ +CREATE TABLE leaf2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + hash VARCHAR NOT NULL UNIQUE, + block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, + leaf JSONB NOT NULL, + qc JSONB NOT NULL +); + +CREATE TABLE leaf_migration ( + id SERIAL PRIMARY KEY, + completed bool NOT NULL DEFAULT false +); + +INSERT INTO leaf_migration ("completed") VALUES (false); \ No newline at end of file diff --git a/hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql b/hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql new file mode 100644 index 0000000000..ce22030d5b --- /dev/null +++ b/hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql @@ -0,0 +1,15 @@ +CREATE TABLE leaf2 +( + height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, + hash VARCHAR NOT NULL UNIQUE, + block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, + leaf JSONB NOT NULL, + qc JSONB NOT NULL +); + +CREATE TABLE leaf_migration ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + completed bool NOT NULL DEFAULT false +); + +INSERT INTO leaf_migration ("completed") VALUES (false); \ No newline at end of file diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index 948422fabd..03cb36f744 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -498,6 +498,7 @@ mod test { use super::*; use crate::data_source::storage::AvailabilityStorage; use crate::data_source::VersionedDataSource; + use crate::testing::mocks::MockVersions; use crate::{ data_source::ExtensibleDataSource, status::StatusDataSource, @@ -513,7 +514,8 @@ mod test { use async_lock::RwLock; use committable::Committable; use futures::future::FutureExt; - use hotshot_types::{data::Leaf, simple_certificate::QuorumCertificate}; + use hotshot_types::data::Leaf2; + use hotshot_types::simple_certificate::QuorumCertificate2; use portpicker::pick_unused_port; use serde::de::DeserializeOwned; use std::{fmt::Debug, time::Duration}; @@ -883,9 +885,11 @@ mod test { ); // mock up some consensus data. - let leaf = Leaf::::genesis(&Default::default(), &Default::default()).await; + let leaf = + Leaf2::::genesis::(&Default::default(), &Default::default()) + .await; let qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) + QuorumCertificate2::genesis::(&Default::default(), &Default::default()) .await; let leaf = LeafQueryData::new(leaf, qc).unwrap(); let block = BlockQueryData::new(leaf.header().clone(), MockPayload::genesis()); diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index b559efa030..848d72f8ea 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -13,8 +13,8 @@ use crate::{types::HeightIndexed, Header, Metadata, Payload, Transaction, VidCommon, VidShare}; use committable::{Commitment, Committable}; use hotshot_types::{ - data::Leaf, - simple_certificate::QuorumCertificate, + data::{Leaf, Leaf2}, + simple_certificate::QuorumCertificate2, traits::{ self, block_contents::{BlockHeader, GENESIS_VID_NUM_STORAGE_NODES}, @@ -28,8 +28,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use snafu::{ensure, Snafu}; use std::fmt::Debug; -pub type LeafHash = Commitment>; -pub type QcHash = Commitment>; +pub type LeafHash = Commitment>; +pub type QcHash = Commitment>; /// A block hash is the hash of the block header. /// @@ -192,8 +192,8 @@ pub trait QueryablePayload: traits::BlockPayload { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(bound = "")] pub struct LeafQueryData { - pub(crate) leaf: Leaf, - pub(crate) qc: QuorumCertificate, + pub(crate) leaf: Leaf2, + pub(crate) qc: QuorumCertificate2, } #[derive(Clone, Debug, Snafu)] @@ -212,13 +212,13 @@ impl LeafQueryData { /// /// Fails with an [`InconsistentLeafError`] if `qc` does not reference `leaf`. pub fn new( - mut leaf: Leaf, - qc: QuorumCertificate, + mut leaf: Leaf2, + qc: QuorumCertificate2, ) -> Result> { // TODO: Replace with the new `commit` function in HotShot. Add an `upgrade_lock` parameter // and a `HsVer: Versions` bound, then call `leaf.commit(upgrade_lock).await`. This will // require updates in callers and relevant types as well. - let leaf_commit = as Committable>::commit(&leaf); + let leaf_commit = as Committable>::commit(&leaf); ensure!( qc.data.leaf_commit == leaf_commit, InconsistentLeafSnafu { @@ -239,16 +239,16 @@ impl LeafQueryData { instance_state: &Types::InstanceState, ) -> Self { Self { - leaf: Leaf::genesis(validated_state, instance_state).await, - qc: QuorumCertificate::genesis::(validated_state, instance_state).await, + leaf: Leaf2::genesis::(validated_state, instance_state).await, + qc: QuorumCertificate2::genesis::(validated_state, instance_state).await, } } - pub fn leaf(&self) -> &Leaf { + pub fn leaf(&self) -> &Leaf2 { &self.leaf } - pub fn qc(&self) -> &QuorumCertificate { + pub fn qc(&self) -> &QuorumCertificate2 { &self.qc } @@ -260,7 +260,7 @@ impl LeafQueryData { // TODO: Replace with the new `commit` function in HotShot. Add an `upgrade_lock` parameter // and a `HsVer: Versions` bound, then call `leaf.commit(upgrade_lock).await`. This will // require updates in callers and relevant types as well. - as Committable>::commit(&self.leaf) + as Committable>::commit(&self.leaf) } pub fn block_hash(&self) -> BlockHash { diff --git a/hotshot-query-service/src/data_source.rs b/hotshot-query-service/src/data_source.rs index 412b6dc96c..56a7baf0f9 100644 --- a/hotshot-query-service/src/data_source.rs +++ b/hotshot-query-service/src/data_source.rs @@ -133,7 +133,7 @@ pub mod availability_tests { }; use committable::Committable; use futures::stream::StreamExt; - use hotshot_types::data::Leaf; + use hotshot_types::data::Leaf2; use std::collections::HashMap; use std::fmt::Debug; use std::ops::{Bound, RangeBounds}; @@ -148,7 +148,7 @@ pub mod availability_tests { assert_eq!(leaf.height(), i as u64); assert_eq!( leaf.hash(), - as Committable>::commit(&leaf.leaf) + as Committable>::commit(&leaf.leaf) ); // Check indices. @@ -550,11 +550,11 @@ pub mod persistence_tests { setup_test, }, types::HeightIndexed, - Leaf, + Leaf2, }; use committable::Committable; use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::simple_certificate::QuorumCertificate2; #[tokio::test(flavor = "multi_thread")] pub async fn test_revert() @@ -571,12 +571,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut qc = QuorumCertificate::::genesis::( + let mut qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut leaf = Leaf::::genesis( + let mut leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -584,7 +584,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. leaf.block_header_mut().block_number += 1; - qc.data.leaf_commit = as Committable>::commit(&leaf); + qc.data.leaf_commit = as Committable>::commit(&leaf); let block = BlockQueryData::new(leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(leaf, qc).unwrap(); @@ -623,12 +623,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut qc = QuorumCertificate::::genesis::( + let mut qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut leaf = Leaf::::genesis( + let mut leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -636,7 +636,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. leaf.block_header_mut().block_number += 1; - qc.data.leaf_commit = as Committable>::commit(&leaf); + qc.data.leaf_commit = as Committable>::commit(&leaf); let block = BlockQueryData::new(leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(leaf, qc).unwrap(); @@ -686,12 +686,12 @@ pub mod persistence_tests { let ds = D::connect(&storage).await; // Mock up some consensus data. - let mut mock_qc = QuorumCertificate::::genesis::( + let mut mock_qc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) .await; - let mut mock_leaf = Leaf::::genesis( + let mut mock_leaf = Leaf2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) @@ -699,7 +699,7 @@ pub mod persistence_tests { // Increment the block number, to distinguish this block from the genesis block, which // already exists. mock_leaf.block_header_mut().block_number += 1; - mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); + mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); let block = BlockQueryData::new(mock_leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(mock_leaf.clone(), mock_qc.clone()).unwrap(); @@ -725,7 +725,7 @@ pub mod persistence_tests { // Get a mutable transaction again, insert different data. mock_leaf.block_header_mut().block_number += 1; - mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); + mock_qc.data.leaf_commit = as Committable>::commit(&mock_leaf); let block = BlockQueryData::new(mock_leaf.block_header().clone(), MockPayload::genesis()); let leaf = LeafQueryData::new(mock_leaf, mock_qc).unwrap(); diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index 01149a62d1..96ad43a728 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -11,7 +11,6 @@ // see . #![cfg(feature = "sql-data-source")] - use crate::{ data_source::{ storage::pruning::{PruneStorage, PrunerCfg, PrunerConfig}, @@ -22,15 +21,22 @@ use crate::{ status::HasMetrics, QueryError, QueryResult, }; +use anyhow::Context; use async_trait::async_trait; use chrono::Utc; +use committable::Committable; +use hotshot_types::{ + data::{Leaf, Leaf2}, + simple_certificate::{QuorumCertificate, QuorumCertificate2}, + traits::{metrics::Metrics, node_implementation::NodeType}, +}; -use hotshot_types::traits::metrics::Metrics; use itertools::Itertools; use log::LevelFilter; #[cfg(not(feature = "embedded-db"))] use futures::future::FutureExt; +use serde_json::Value; #[cfg(not(feature = "embedded-db"))] use sqlx::postgres::{PgConnectOptions, PgSslMode}; #[cfg(feature = "embedded-db")] @@ -796,6 +802,114 @@ impl VersionedDataSource for SqlStorage { } } +struct Leaf2Row { + height: i64, + hash: String, + block_hash: String, + leaf2: Value, + qc2: Value, +} +impl SqlStorage { + pub async fn migrate_types(&self) -> anyhow::Result<()> { + let mut offset = 0; + let limit = 10000; + + loop { + let mut tx = self.read().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + let (is_migration_completed,) = + query_as::<(bool,)>("SELECT completed from leaf_migration LIMIT 1 ") + .fetch_one(tx.as_mut()) + .await?; + + if is_migration_completed { + tracing::info!("leaf1 to leaf2 migration already completed"); + return Ok(()); + } + + let rows = QueryBuilder::default() + .query(&format!( + "SELECT leaf, qc FROM leaf ORDER BY height LIMIT {} OFFSET {}", + limit, offset + )) + .fetch_all(tx.as_mut()) + .await?; + + drop(tx); + + if rows.is_empty() { + tracing::info!("no leaf1 rows found"); + return Ok(()); + } + + let mut leaf_rows = Vec::new(); + + for row in rows.iter() { + let leaf1 = row.try_get("leaf")?; + let qc = row.try_get("qc")?; + let leaf1: Leaf = serde_json::from_value(leaf1)?; + let qc: QuorumCertificate = serde_json::from_value(qc)?; + + let leaf2: Leaf2 = leaf1.into(); + let qc2: QuorumCertificate2 = qc.to_qc2(); + + let commit = leaf2.commit(); + + let leaf2_json = + serde_json::to_value(leaf2.clone()).context("failed to serialize leaf2")?; + let qc2_json = serde_json::to_value(qc2).context("failed to serialize QC2")?; + + leaf_rows.push(Leaf2Row { + height: leaf2.height() as i64, + hash: commit.to_string(), + block_hash: leaf2.block_header().commit().to_string(), + leaf2: leaf2_json, + qc2: qc2_json, + }) + } + + let mut query_builder: sqlx::QueryBuilder = + sqlx::QueryBuilder::new("INSERT INTO leaf2 (height, hash, block_hash, leaf, qc) "); + + query_builder.push_values(leaf_rows.into_iter(), |mut b, row| { + b.push_bind(row.height) + .push_bind(row.hash) + .push_bind(row.block_hash) + .push_bind(row.leaf2) + .push_bind(row.qc2); + }); + + let query = query_builder.build(); + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + query.execute(tx.as_mut()).await?; + + tx.commit().await?; + + if rows.len() < limit { + break; + } + + offset += limit; + } + + let mut tx = self.write().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + tx.upsert("leaf_migration", ["completed"], ["id"], [(true,)]) + .await?; + + tx.commit().await?; + Ok(()) + } +} + // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(any(test, feature = "testing"), not(target_os = "windows")))] pub mod testing { @@ -813,8 +927,8 @@ pub mod testing { use portpicker::pick_unused_port; use super::Config; + use crate::availability::query_data::QueryableHeader; use crate::testing::sleep; - #[derive(Debug)] pub struct TmpDb { #[cfg(not(feature = "embedded-db"))] @@ -1102,10 +1216,21 @@ pub mod testing { // These tests run the `postgres` Docker image, which doesn't work on Windows. #[cfg(all(test, not(target_os = "windows")))] mod test { + use committable::{Commitment, CommitmentBoundsArkless, Committable}; + use hotshot::traits::BlockPayload; use hotshot_example_types::{ node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; + use hotshot_types::traits::EncodeBytes; + use hotshot_types::{ + data::{QuorumProposal, ViewNumber}, + simple_vote::QuorumData, + traits::{ + block_contents::{vid_commitment, BlockHeader}, + node_implementation::ConsensusTime, + }, + }; use jf_merkle_tree::{ prelude::UniversalMerkleTree, MerkleTreeScheme, ToTraversalPath, UniversalMerkleTreeScheme, }; @@ -1114,11 +1239,11 @@ mod test { use super::{testing::TmpDb, *}; use crate::{ - availability::LeafQueryData, + availability::{LeafQueryData, QueryableHeader}, data_source::storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, merklized_state::{MerklizedState, UpdateStateData}, testing::{ - mocks::{MockMerkleTree, MockTypes}, + mocks::{MockHeader, MockMerkleTree, MockPayload, MockTypes}, setup_test, }, }; @@ -1495,4 +1620,123 @@ mod test { ); } } + + #[tokio::test(flavor = "multi_thread")] + async fn test_leaf_migration() { + setup_test(); + + let num_leaves = 200; + let db = TmpDb::init().await; + + let storage = SqlStorage::connect(db.config()).await.unwrap(); + + for i in 0..num_leaves { + let view = ViewNumber::new(i); + let validated_state = TestValidatedState::default(); + let instance_state = TestInstanceState::default(); + + let (payload, metadata) = >::from_transactions( + [], + &validated_state, + &instance_state, + ) + .await + .unwrap(); + let builder_commitment = + >::builder_commitment(&payload, &metadata); + let payload_bytes = payload.encode(); + + let payload_commitment = vid_commitment(&payload_bytes, 4); + + let mut block_header = >::genesis( + &instance_state, + payload_commitment, + builder_commitment, + metadata, + ); + + block_header.block_number = i; + + let null_quorum_data = QuorumData { + leaf_commit: Commitment::>::default_commitment_no_preimage(), + }; + + let mut qc = QuorumCertificate::new( + null_quorum_data.clone(), + null_quorum_data.commit(), + view, + None, + std::marker::PhantomData, + ); + + let quorum_proposal = QuorumProposal { + block_header, + view_number: view, + justify_qc: qc.clone(), + upgrade_certificate: None, + proposal_certificate: None, + }; + + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + leaf.fill_block_payload(payload, 4).unwrap(); + qc.data.leaf_commit = as Committable>::commit(&leaf); + + let height = leaf.height() as i64; + let hash = as Committable>::commit(&leaf).to_string(); + let header = leaf.block_header(); + + let header_json = serde_json::to_value(header) + .context("failed to serialize header") + .unwrap(); + + let payload_commitment = + >::payload_commitment(header); + let mut tx = storage.write().await.unwrap(); + + tx.upsert( + "header", + ["height", "hash", "payload_hash", "data", "timestamp"], + ["height"], + [( + height, + leaf.block_header().commit().to_string(), + payload_commitment.to_string(), + header_json, + leaf.block_header().timestamp() as i64, + )], + ) + .await + .unwrap(); + + let leaf_json = serde_json::to_value(leaf.clone()).expect("failed to serialize leaf"); + let qc_json = serde_json::to_value(qc).expect("failed to serialize QC"); + tx.upsert( + "leaf", + ["height", "hash", "block_hash", "leaf", "qc"], + ["height"], + [( + height, + hash, + header.commit().to_string(), + leaf_json, + qc_json, + )], + ) + .await + .unwrap(); + tx.commit().await.unwrap(); + } + + storage + .migrate_types::() + .await + .expect("failed to migrate"); + let mut tx = storage.read().await.unwrap(); + let (count,) = query_as::<(i64,)>("SELECT COUNT(*) from leaf2") + .fetch_one(tx.as_mut()) + .await + .unwrap(); + + assert_eq!(count as u64, num_leaves, "not all leaves migrated"); + } } diff --git a/hotshot-query-service/src/data_source/storage/sql/queries.rs b/hotshot-query-service/src/data_source/storage/sql/queries.rs index b8c227b752..696aca5ba3 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries.rs @@ -14,22 +14,21 @@ //! Immutable query functionality of a SQL database. use super::{Database, Db, Query, QueryAs, Transaction}; +use crate::Leaf2; use crate::{ availability::{ BlockId, BlockQueryData, LeafQueryData, PayloadQueryData, QueryablePayload, VidCommonQueryData, }, data_source::storage::{PayloadMetadata, VidCommonMetadata}, - Header, Leaf, Payload, QueryError, QueryResult, + Header, Payload, QueryError, QueryResult, }; use anyhow::Context; use derivative::Derivative; -use hotshot_types::{ - simple_certificate::QuorumCertificate, - traits::{ - block_contents::{BlockHeader, BlockPayload}, - node_implementation::NodeType, - }, +use hotshot_types::simple_certificate::QuorumCertificate2; +use hotshot_types::traits::{ + block_contents::{BlockHeader, BlockPayload}, + node_implementation::NodeType, }; use sqlx::{Arguments, FromRow, Row}; use std::{ @@ -171,10 +170,10 @@ where { fn from_row(row: &'r ::Row) -> sqlx::Result { let leaf = row.try_get("leaf")?; - let leaf: Leaf = serde_json::from_value(leaf).decode_error("malformed leaf")?; + let leaf: Leaf2 = serde_json::from_value(leaf).decode_error("malformed leaf")?; let qc = row.try_get("qc")?; - let qc: QuorumCertificate = + let qc: QuorumCertificate2 = serde_json::from_value(qc).decode_error("malformed QC")?; Ok(Self { leaf, qc }) diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs index 759296fb87..7ae9ff94f3 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/availability.rs @@ -50,7 +50,7 @@ where }; let row = query .query(&format!( - "SELECT {LEAF_COLUMNS} FROM leaf WHERE {where_clause}" + "SELECT {LEAF_COLUMNS} FROM leaf2 WHERE {where_clause}" )) .fetch_one(self.as_mut()) .await?; @@ -174,7 +174,7 @@ where { let mut query = QueryBuilder::default(); let where_clause = query.bounds_to_where_clause(range, "height")?; - let sql = format!("SELECT {LEAF_COLUMNS} FROM leaf {where_clause} ORDER BY height"); + let sql = format!("SELECT {LEAF_COLUMNS} FROM leaf2 {where_clause} ORDER BY height"); Ok(query .query(&sql) .fetch(self.as_mut()) @@ -367,7 +367,7 @@ where async fn first_available_leaf(&mut self, from: u64) -> QueryResult> { let row = query(&format!( - "SELECT {LEAF_COLUMNS} FROM leaf WHERE height >= $1 ORDER BY height LIMIT 1" + "SELECT {LEAF_COLUMNS} FROM leaf2 WHERE height >= $1 ORDER BY height LIMIT 1" )) .bind(from as i64) .fetch_one(self.as_mut()) diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index ca8874c179..89716ce9d7 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -155,7 +155,7 @@ where // need to select the total number of VID rows and the number of present VID rows with a // NULL share. let sql = "SELECT l.max_height, l.total_leaves, p.null_payloads, v.total_vid, vn.null_vid, pruned_height FROM - (SELECT max(leaf.height) AS max_height, count(*) AS total_leaves FROM leaf) AS l, + (SELECT max(leaf.height) AS max_height, count(*) AS total_leaves FROM leaf2) AS l, (SELECT count(*) AS null_payloads FROM payload WHERE data IS NULL) AS p, (SELECT count(*) AS total_vid FROM vid) AS v, (SELECT count(*) AS null_vid FROM vid WHERE share IS NULL) AS vn, diff --git a/hotshot-query-service/src/data_source/storage/sql/transaction.rs b/hotshot-query-service/src/data_source/storage/sql/transaction.rs index 5eb3c731b8..5a50113744 100644 --- a/hotshot-query-service/src/data_source/storage/sql/transaction.rs +++ b/hotshot-query-service/src/data_source/storage/sql/transaction.rs @@ -504,7 +504,7 @@ where let leaf_json = serde_json::to_value(leaf.leaf()).context("failed to serialize leaf")?; let qc_json = serde_json::to_value(leaf.qc()).context("failed to serialize QC")?; self.upsert( - "leaf", + "leaf2", ["height", "hash", "block_hash", "leaf", "qc"], ["height"], [( diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 9ca48f0a99..e44c4b11b3 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -24,7 +24,7 @@ use futures::future::Future; use hotshot::types::{Event, EventType}; use hotshot_types::event::LeafInfo; use hotshot_types::{ - data::{Leaf, Leaf2, QuorumProposal}, + data::Leaf2, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, @@ -34,26 +34,6 @@ use hotshot_types::{ use jf_vid::VidScheme; use std::iter::once; -fn downgrade_leaf(leaf2: Leaf2) -> Leaf { - // TODO do we still need some check here? - // `drb_seed` no longer exists on `Leaf2` - // if leaf2.drb_seed != [0; 32] && leaf2.drb_result != [0; 32] { - // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); - // } - let quorum_proposal = QuorumProposal { - block_header: leaf2.block_header().clone(), - view_number: leaf2.view_number(), - justify_qc: leaf2.justify_qc().to_qc(), - upgrade_certificate: leaf2.upgrade_certificate(), - proposal_certificate: None, - }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - if let Some(payload) = leaf2.block_payload() { - leaf.fill_block_payload_unchecked(payload); - } - leaf -} - /// An extension trait for types which implement the update trait for each API module. /// /// If a type implements [UpdateAvailabilityData] and @@ -113,25 +93,24 @@ where }, ) in qcs.zip(leaf_chain.iter().rev()) { - let leaf = downgrade_leaf(leaf2.clone()); - let qc = qc2.to_qc(); - let height = leaf.block_header().block_number(); - let leaf_data = match LeafQueryData::new(leaf.clone(), qc.clone()) { + let height = leaf2.block_header().block_number(); + + let leaf_data = match LeafQueryData::new(leaf2.clone(), qc2.clone()) { Ok(leaf) => leaf, Err(err) => { tracing::error!( height, - ?leaf, + ?leaf2, ?qc, "inconsistent leaf; cannot append leaf information: {err:#}" ); - return Err(leaf.block_header().block_number()); + return Err(leaf2.block_header().block_number()); } }; - let block_data = leaf + let block_data = leaf2 .block_payload() - .map(|payload| BlockQueryData::new(leaf.block_header().clone(), payload)); + .map(|payload| BlockQueryData::new(leaf2.block_header().clone(), payload)); if block_data.is_none() { tracing::info!(height, "block not available at decide"); } @@ -139,17 +118,17 @@ where let (vid_common, vid_share) = if let Some(vid_share) = vid_share { ( Some(VidCommonQueryData::new( - leaf.block_header().clone(), + leaf2.block_header().clone(), vid_share.common.clone(), )), Some(vid_share.share.clone()), ) - } else if leaf.view_number().u64() == 0 { + } else if leaf2.view_number().u64() == 0 { // HotShot does not run VID in consensus for the genesis block. In this case, // the block payload is guaranteed to always be empty, so VID isn't really // necessary. But for consistency, we will still store the VID dispersal data, // computing it ourselves based on the well-known genesis VID commitment. - match genesis_vid(&leaf) { + match genesis_vid(leaf2) { Ok((common, share)) => (Some(common), Some(share)), Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); @@ -168,7 +147,7 @@ where .await { tracing::error!(height, "failed to append leaf information: {err:#}"); - return Err(leaf.block_header().block_number()); + return Err(leaf2.block_header().block_number()); } } } @@ -177,7 +156,7 @@ where } fn genesis_vid( - leaf: &Leaf, + leaf: &Leaf2, ) -> anyhow::Result<(VidCommonQueryData, VidShare)> { let payload = Payload::::empty().0; let bytes = payload.encode(); diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index b9d9c01c44..b215d8cad8 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -447,7 +447,7 @@ use tide_disco::{method::ReadState, App, StatusCode}; use vbs::version::StaticVersionType; pub use hotshot_types::{ - data::Leaf, + data::Leaf2, simple_certificate::QuorumCertificate, vid::{VidCommitment, VidCommon, VidShare}, }; @@ -597,7 +597,7 @@ mod test { use async_trait::async_trait; use atomic_store::{load_store::BincodeLoadStore, AtomicStore, AtomicStoreLoader, RollingLog}; use futures::future::FutureExt; - use hotshot_types::simple_certificate::QuorumCertificate; + use hotshot_types::simple_certificate::QuorumCertificate2; use portpicker::pick_unused_port; use std::ops::{Bound, RangeBounds}; use std::time::Duration; @@ -825,9 +825,11 @@ mod test { .unwrap(); // Mock up some data and add a block to the store. - let leaf = Leaf::::genesis(&Default::default(), &Default::default()).await; + let leaf = + Leaf2::::genesis::(&Default::default(), &Default::default()) + .await; let qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) + QuorumCertificate2::genesis::(&Default::default(), &Default::default()) .await; let leaf = LeafQueryData::new(leaf, qc).unwrap(); let block = BlockQueryData::new(leaf.header().clone(), MockPayload::genesis()); diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index eb05938302..c60d089e62 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -13,12 +13,12 @@ use crate::service::{ server_message::ServerMessage, }; use async_lock::RwLock; -use espresso_types::{downgrade_leaf, PubKey, SeqTypes}; +use espresso_types::{PubKey, SeqTypes}; use futures::{ channel::mpsc::{self, Receiver, SendError, Sender}, Sink, SinkExt, Stream, StreamExt, }; -use hotshot_query_service::Leaf; +use hotshot_query_service::Leaf2; use hotshot_types::event::{Event, EventType}; use serde::{Deserialize, Serialize}; use tokio::{spawn, task::JoinHandle}; @@ -88,7 +88,7 @@ impl HotShotEventProcessingTask { where S: Stream> + Send + Unpin + 'static, K1: Sink + Send + Unpin + 'static, - K2: Sink, Error = SendError> + Send + Unpin + 'static, + K2: Sink, Error = SendError> + Send + Unpin + 'static, { let task_handle = spawn(Self::process_messages( event_stream, @@ -107,7 +107,7 @@ impl HotShotEventProcessingTask { where S: Stream> + Send + Unpin + 'static, K1: Sink + Unpin, - K2: Sink, Error = SendError> + Unpin, + K2: Sink, Error = SendError> + Unpin, { let mut event_stream = event_receiver; let mut url_sender = url_sender; @@ -128,9 +128,8 @@ impl HotShotEventProcessingTask { EventType::Decide { leaf_chain, .. } => { for leaf_info in leaf_chain.iter().rev() { let leaf2 = leaf_info.leaf.clone(); - let leaf = downgrade_leaf(leaf2); - let send_result = leaf_sender.send(leaf).await; + let send_result = leaf_sender.send(leaf2).await; if let Err(err) = send_result { tracing::error!("leaf sender closed: {}", err); panic!("HotShotEventProcessingTask leaf sender is closed, unrecoverable, the block state will stagnate."); @@ -280,7 +279,7 @@ impl Drop for ProcessExternalMessageHandlingTask { pub async fn create_node_validator_processing( config: NodeValidatorConfig, internal_client_message_receiver: Receiver>>, - leaf_receiver: Receiver>, + leaf_receiver: Receiver>, ) -> Result>, CreateNodeValidatorProcessingError> { let client_thread_state = ClientThreadState::>::new( Default::default(), diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index 90d1867474..b364c105ee 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -11,7 +11,7 @@ use futures::{ channel::mpsc::{self, Sender}, FutureExt, Sink, SinkExt, Stream, StreamExt, }; -use hotshot_query_service::Leaf; +use hotshot_query_service::Leaf2; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::light_client::{CircuitField, StateVerKey}; use hotshot_types::signature_key::BLSPubKey; @@ -461,11 +461,11 @@ impl HotshotQueryServiceLeafStreamRetriever { } impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { - type Item = Leaf; + type Item = Leaf2; type ItemError = hotshot_query_service::Error; type Error = hotshot_query_service::Error; type Stream = surf_disco::socket::Connection< - Leaf, + Leaf2, surf_disco::socket::Unsupported, Self::ItemError, Version01, @@ -496,7 +496,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { "availability/stream/leaves/{}", start_block_height )) - .subscribe::() + .subscribe::() .await; let leaves_stream = match leaves_stream_result { @@ -540,8 +540,8 @@ impl ProcessProduceLeafStreamTask { /// returned state. pub fn new(leaf_stream_retriever: R, leaf_sender: K) -> Self where - R: LeafStreamRetriever> + Send + Sync + 'static, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever> + Send + Sync + 'static, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { // let future = Self::process_consume_leaf_stream(leaf_stream_retriever, leaf_sender); let task_handle = spawn(Self::connect_and_process_leaves( @@ -556,8 +556,8 @@ impl ProcessProduceLeafStreamTask { async fn connect_and_process_leaves(leaf_stream_retriever: R, leaf_sender: K) where - R: LeafStreamRetriever>, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever>, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { // We want to try and ensure that we are connected to the HotShot Query // Service, and are consuming leaves. @@ -596,7 +596,7 @@ impl ProcessProduceLeafStreamTask { leaf_stream_receiver: &R, ) -> Result where - R: LeafStreamRetriever>, + R: LeafStreamRetriever>, { let backoff_params = BackoffParams::default(); let mut delay = Duration::ZERO; @@ -639,8 +639,8 @@ impl ProcessProduceLeafStreamTask { /// will return. async fn process_consume_leaf_stream(leaves_stream: R::Stream, leaf_sender: K) where - R: LeafStreamRetriever>, - K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, + R: LeafStreamRetriever>, + K: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { let mut leaf_sender = leaf_sender; let mut leaves_stream = leaves_stream; diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index ac4c0fea46..f9324ab90d 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1196,11 +1196,12 @@ pub mod tests { }; use async_lock::RwLock; use bitvec::vec::BitVec; - use espresso_types::{Leaf, NodeState, ValidatedState}; + use espresso_types::{Leaf2, NodeState, ValidatedState}; use futures::{ channel::mpsc::{self, Sender}, SinkExt, StreamExt, }; + use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::{ @@ -1374,7 +1375,8 @@ pub mod tests { let (_, _, _, mut data_state) = create_test_data_state(); let client_thread_state = Arc::new(RwLock::new(create_test_client_thread_state())); - let leaf_1 = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf_1 = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let block_1 = create_block_detail_from_leaf(&leaf_1); data_state.add_latest_block(clone_block_detail(&block_1)); @@ -1614,7 +1616,8 @@ pub mod tests { // No response expected from the client messages at the moment. // send a new leaf - let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let expected_block = create_block_detail_from_leaf(&leaf); let arc_expected_block = Arc::new(expected_block); diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index 96599607a6..c88807b230 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -9,7 +9,7 @@ use futures::{channel::mpsc::SendError, Sink, SinkExt, Stream, StreamExt}; use hotshot_query_service::{ availability::{QueryableHeader, QueryablePayload}, explorer::{BlockDetail, ExplorerHeader, Timestamp}, - Leaf, Resolvable, + Leaf2, Resolvable, }; use hotshot_stake_table::vec_based::StakeTable; use hotshot_types::{ @@ -151,7 +151,7 @@ impl DataState { /// [create_block_detail_from_leaf] is a helper function that will build a /// [BlockDetail] from the reference to [Leaf]. -pub fn create_block_detail_from_leaf(leaf: &Leaf) -> BlockDetail { +pub fn create_block_detail_from_leaf(leaf: &Leaf2) -> BlockDetail { let block_header = leaf.block_header(); let block_payload = &leaf.block_payload().unwrap_or(Payload::empty().0); @@ -223,7 +223,7 @@ impl std::error::Error for ProcessLeafError { /// computed into a [BlockDetail] and sent to the [Sink] so that it can be /// processed for real-time considerations. async fn process_incoming_leaf( - leaf: Leaf, + leaf: Leaf2, data_state: Arc>, mut block_sender: BDSink, mut voters_sender: BVSink, @@ -339,7 +339,7 @@ impl ProcessLeafStreamTask { voters_sender: K2, ) -> Self where - S: Stream> + Send + Sync + Unpin + 'static, + S: Stream> + Send + Sync + Unpin + 'static, K1: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, K2: Sink, Error = SendError> + Clone + Send + Sync + Unpin + 'static, { @@ -363,7 +363,7 @@ impl ProcessLeafStreamTask { block_sender: BDSink, voters_senders: BVSink, ) where - S: Stream> + Unpin, + S: Stream> + Unpin, Header: BlockHeader + QueryableHeader + ExplorerHeader, Payload: BlockPayload, BDSink: Sink, Error = SendError> + Clone + Unpin, @@ -569,9 +569,10 @@ mod tests { }; use async_lock::RwLock; use espresso_types::{ - v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf, NodeState, ValidatedState, + v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use futures::{channel::mpsc, SinkExt, StreamExt}; + use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::time::timeout; @@ -627,7 +628,7 @@ mod tests { }; let instance_state = NodeState::mock(); - let sample_leaf = Leaf::genesis(&validated_state, &instance_state).await; + let sample_leaf = Leaf2::genesis::(&validated_state, &instance_state).await; let mut leaf_sender = leaf_sender; // We should be able to send a leaf without issue diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 9e0726f80e..3602480e22 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1101,12 +1101,15 @@ mod api_tests { }; use ethers::utils::Anvil; use futures::{future, stream::StreamExt}; + use hotshot_example_types::node_types::TestVersions; use hotshot_query_service::availability::{ AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; + use hotshot_types::data::{DaProposal2, EpochNumber, VidDisperseShare2}; + use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ - data::{DaProposal, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare}, + data::{QuorumProposal2, QuorumProposalWrapper}, event::LeafInfo, message::Proposal, simple_certificate::QuorumCertificate, @@ -1286,7 +1289,7 @@ mod api_tests { // Create two non-consecutive leaf chains. let mut chain1 = vec![]; - let genesis = Leaf2::genesis(&Default::default(), &NodeState::mock()).await; + let genesis = Leaf2::genesis::(&Default::default(), &NodeState::mock()).await; let payload = genesis.block_payload().unwrap(); let payload_bytes_arc = payload.encode(); let disperse = vid_scheme(2).disperse(payload_bytes_arc.clone()).unwrap(); @@ -1345,8 +1348,8 @@ mod api_tests { share: disperse.shares[0].clone(), common: disperse.common.clone(), recipient_key: pubkey, - epoch: EpochNumber::new(0), - target_epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), data_epoch_payload_commitment: None, }; persistence @@ -1361,7 +1364,7 @@ mod api_tests { encoded_transactions: payload_bytes_arc.clone(), metadata: payload.ns_table().clone(), view_number: leaf.view_number(), - epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), }; let da_proposal = Proposal { data: da_proposal_inner, diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index 657492768d..d5dcb5039e 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -450,7 +450,7 @@ where .fetch_one(tx.as_mut()) .await?; let proposal: Proposal> = bincode::deserialize(&data)?; - Ok(Leaf2::from_quorum_proposal(&proposal.data)) + Ok(Leaf2::from_quorum_proposal(&proposal.data.into())) } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 389bcff8de..04ffcc7065 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -56,7 +56,7 @@ mod persistence_tests { use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ data::{ - DaProposal, EpochNumber, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare, + DaProposal2, EpochNumber, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare2, ViewNumber, }, event::{EventType, HotShotAction, LeafInfo}, @@ -170,7 +170,8 @@ mod persistence_tests { None ); - let leaf: Leaf2 = Leaf2::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf: Leaf2 = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); let disperse = vid_scheme(2) @@ -184,8 +185,8 @@ mod persistence_tests { share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, - epoch: EpochNumber::new(0), - target_epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), data_epoch_payload_commitment: None, }; let mut quorum_proposal = Proposal { @@ -256,7 +257,7 @@ mod persistence_tests { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), - epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), }; let da_proposal = Proposal { @@ -666,8 +667,8 @@ mod persistence_tests { share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, - epoch: EpochNumber::new(0), - target_epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), + target_epoch: Some(EpochNumber::new(0)), data_epoch_payload_commitment: None, } .to_proposal(&privkey) @@ -703,7 +704,7 @@ mod persistence_tests { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), - epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), }, signature: block_payload_signature, _pd: Default::default(), @@ -857,8 +858,8 @@ mod persistence_tests { share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, - epoch: EpochNumber::new(0), - target_epoch: EpochNumber::new(0), + epoch: None, + target_epoch: None, data_epoch_payload_commitment: None, } .to_proposal(&privkey) @@ -898,7 +899,7 @@ mod persistence_tests { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), - epoch: EpochNumber::new(0), + epoch: None, }, signature: block_payload_signature, _pd: Default::default(), diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 30836dcd10..b5d8cf344a 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -8,7 +8,10 @@ use espresso_types::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + VidDisperseShare, VidDisperseShare2, + }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, simple_certificate::{ @@ -1003,7 +1006,7 @@ impl SequencerPersistence for Persistence { async fn append_proposal2( &self, - proposal: &Proposal>, + proposal: &Proposal>, ) -> anyhow::Result<()> { self.append_quorum_proposal2(proposal).await } @@ -1371,15 +1374,15 @@ mod test { use super::*; use crate::persistence::testing::TestablePersistence; - use crate::{persistence::testing::TestablePersistence, BLSPubKey}; + use crate::BLSPubKey; use committable::Committable; use committable::{Commitment, CommitmentBoundsArkless}; - use espresso_types::{Header, Leaf, NodeState, ValidatedState}; + use espresso_types::{Header, Leaf, ValidatedState}; use hotshot_types::{ simple_certificate::QuorumCertificate, simple_vote::QuorumData, - traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, + traits::{block_contents::vid_commitment, EncodeBytes}, vid::vid_scheme, }; use jf_vid::VidScheme; @@ -1688,6 +1691,8 @@ mod test { "quorum proposals count does not match", ); } + + #[tokio::test(flavor = "multi_thread")] async fn test_load_quorum_proposals_invalid_extension() { setup_test(); @@ -1725,13 +1730,13 @@ mod test { // Store quorum proposals. let quorum_proposal1 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal1) + .append_quorum_proposal2(&quorum_proposal1) .await .unwrap(); quorum_proposal.data.proposal.view_number = ViewNumber::new(1); let quorum_proposal2 = quorum_proposal.clone(); storage - .append_quorum_proposal(&quorum_proposal2) + .append_quorum_proposal2(&quorum_proposal2) .await .unwrap(); @@ -1797,7 +1802,7 @@ mod test { // Store valid quorum proposal. storage - .append_quorum_proposal(&quorum_proposal) + .append_quorum_proposal2(&quorum_proposal) .await .unwrap(); diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index 4c82a6949a..c20857d2da 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -9,7 +9,7 @@ use espresso_types::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare}, + data::{DaProposal, DaProposal2, QuorumProposalWrapper, VidDisperseShare, VidDisperseShare2}, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, @@ -192,8 +192,27 @@ impl SequencerPersistence for NoStorage { async fn append_proposal2( &self, - _proposal: &Proposal>, + _proposal: &Proposal>, ) -> anyhow::Result<()> { Ok(()) } + + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_da_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_vid_shares(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_undecided_state(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { + Ok(()) + } + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { + Ok(()) + } } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 1f7d40e5d2..e706599086 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -5,7 +5,7 @@ use committable::Committable; use derivative::Derivative; use derive_more::derive::{From, Into}; use espresso_types::{ - downgrade_commitment_map, downgrade_leaf, parse_duration, parse_size, upgrade_commitment_map, + parse_duration, parse_size, upgrade_commitment_map, v0::traits::{EventConsumer, PersistenceOptions, SequencerPersistence, StateCatchup}, BackoffParams, BlockMerkleTree, FeeMerkleTree, Leaf, Leaf2, NetworkConfig, Payload, }; @@ -30,9 +30,12 @@ use hotshot_query_service::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, + VidDisperseShare, VidDisperseShare2, + }, event::{Event, EventType, HotShotAction, LeafInfo}, - message::Proposal, + message::{convert_proposal, Proposal}, simple_certificate::{ NextEpochQuorumCertificate2, QuorumCertificate, QuorumCertificate2, UpgradeCertificate, }, @@ -1138,7 +1141,7 @@ impl SequencerPersistence for Persistence { let bytes: Vec = row.get("data"); let proposal: Proposal> = bincode::deserialize(&bytes)?; - Ok((view_number, proposal)) + Ok((view_number, convert_proposal(proposal))) }) .collect::>>()?, )) @@ -1156,7 +1159,7 @@ impl SequencerPersistence for Persistence { .await?; let proposal: Proposal> = bincode::deserialize(&data)?; - Ok(proposal) + Ok(convert_proposal(proposal)) } async fn append_vid( @@ -1231,7 +1234,7 @@ impl SequencerPersistence for Persistence { .await?; // We also keep track of any QC we see in case we need it to recover our archival storage. - let justify_qc = &proposal.data.justify_qc; + let justify_qc = &proposal.data.justify_qc(); let justify_qc_bytes = bincode::serialize(&justify_qc).context("serializing QC")?; tx.upsert( "quorum_certificate2", @@ -1972,7 +1975,7 @@ async fn fetch_leaf_from_proposals( let qc: QuorumCertificate2 = bincode::deserialize(&qc_bytes).context("deserializing quorum certificate")?; - let leaf = Leaf2::from_quorum_proposal(&proposal.data); + let leaf = Leaf2::from_quorum_proposal(&proposal.data.into()); Ok(Some((leaf, qc))) } @@ -2133,7 +2136,8 @@ mod test { let storage = Persistence::connect(&tmp).await; // Mock up some data. - let leaf = Leaf2::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); let disperse = vid_scheme(2) @@ -2156,7 +2160,7 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: leaf.view_number(), - justify_qc: leaf.justify_qc().to_qc2(), + justify_qc: leaf.justify_qc(), upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -2193,7 +2197,7 @@ mod test { .proposal .justify_qc .data - .leaf_commit = Committable::commit(&leaf.clone().into()); + .leaf_commit = Committable::commit(&leaf.clone()); let qc = next_quorum_proposal.data.justify_qc(); // Add to database. @@ -2259,7 +2263,8 @@ mod test { let data_view = ViewNumber::new(1); // Populate some data. - let leaf = Leaf2::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); @@ -2274,8 +2279,8 @@ mod test { share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, - epoch: EpochNumber::new(0), - target_epoch: EpochNumber::new(0), + epoch: None, + target_epoch: None, data_epoch_payload_commitment: None, } .to_proposal(&privkey) @@ -2314,7 +2319,7 @@ mod test { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: data_view, - epoch: EpochNumber::new(0), + epoch: Some(EpochNumber::new(0)), }, signature: block_payload_signature, _pd: Default::default(), diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index e6b2de65e5..82be9ffa1e 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -706,24 +706,12 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { self.migrate_quorum_certificates().await } - async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { - Ok(()) - } - async fn migrate_da_proposals(&self) -> anyhow::Result<()> { - Ok(()) - } - async fn migrate_vid_shares(&self) -> anyhow::Result<()> { - Ok(()) - } - async fn migrate_undecided_state(&self) -> anyhow::Result<()> { - Ok(()) - } - async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { - Ok(()) - } - async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { - Ok(()) - } + async fn migrate_anchor_leaf(&self) -> anyhow::Result<()>; + async fn migrate_da_proposals(&self) -> anyhow::Result<()>; + async fn migrate_vid_shares(&self) -> anyhow::Result<()>; + async fn migrate_undecided_state(&self) -> anyhow::Result<()>; + async fn migrate_quorum_proposals(&self) -> anyhow::Result<()>; + async fn migrate_quorum_certificates(&self) -> anyhow::Result<()>; async fn load_anchor_view(&self) -> anyhow::Result { match self.load_anchor_leaf().await? { diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index fdac8c80e0..d2c32e598f 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -6,7 +6,7 @@ use derive_more::{From, Into}; use futures::future::BoxFuture; use hotshot_types::{ consensus::CommitmentMap, - data::{Leaf, Leaf2, QuorumProposal}, + data::{Leaf, Leaf2}, traits::node_implementation::NodeType, }; use rand::Rng; @@ -25,25 +25,6 @@ use time::{ }; use tokio::time::sleep; -pub fn downgrade_leaf(leaf2: Leaf2) -> Leaf { - // TODO verify removal. It doesn't seem we need this check, but lets double check. - // if leaf2.drb_seed != INITIAL_DRB_SEED_INPUT && leaf2.drb_result != INITIAL_DRB_RESULT { - // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); - // } - let quorum_proposal = QuorumProposal { - block_header: leaf2.block_header().clone(), - view_number: leaf2.view_number(), - justify_qc: leaf2.justify_qc().to_qc(), - upgrade_certificate: leaf2.upgrade_certificate(), - proposal_certificate: None, - }; - let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - if let Some(payload) = leaf2.block_payload() { - leaf.fill_block_payload_unchecked(payload); - } - leaf -} - pub fn upgrade_commitment_map( map: CommitmentMap>, ) -> CommitmentMap> { @@ -55,17 +36,6 @@ pub fn upgrade_commitment_map( .collect() } -pub fn downgrade_commitment_map( - map: CommitmentMap>, -) -> CommitmentMap> { - map.into_values() - .map(|leaf2| { - let leaf = downgrade_leaf(leaf2); - ( as Committable>::commit(&leaf), leaf) - }) - .collect() -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)] pub enum Update { #[default] From 9b80a9a83b4998eb1248e60296d8b74ccbf351fc Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 6 Feb 2025 20:48:47 +0500 Subject: [PATCH 031/120] fix quorum proposal migration and storage --- .../src/testing/basic.rs | 4 +- hotshot-builder-core/src/service.rs | 17 ++--- .../src/testing/basic_test.rs | 7 +- .../src/testing/finalization_test.rs | 7 +- hotshot-builder-core/src/testing/mod.rs | 15 ++-- .../data_source/storage/sql/queries/node.rs | 2 +- .../src/testing/consensus.rs | 15 ++-- sequencer/src/api.rs | 18 +++-- sequencer/src/persistence.rs | 8 +-- sequencer/src/persistence/fs.rs | 68 +++++++++---------- sequencer/src/persistence/sql.rs | 43 ++++++------ 11 files changed, 99 insertions(+), 105 deletions(-) diff --git a/hotshot-builder-core-refactored/src/testing/basic.rs b/hotshot-builder-core-refactored/src/testing/basic.rs index bfbedf8b0d..99418784b4 100644 --- a/hotshot-builder-core-refactored/src/testing/basic.rs +++ b/hotshot-builder-core-refactored/src/testing/basic.rs @@ -186,9 +186,7 @@ async fn test_pruning() { // everything else is boilerplate. let mock_qc = - QuorumCertificate::genesis::(&Default::default(), &Default::default()) - .await - .to_qc2(); + QuorumCertificate2::genesis::(&Default::default(), &Default::default()).await; let leaf = Leaf2::from_quorum_proposal(&QuorumProposalWrapper { proposal: QuorumProposal2 { block_header: >::genesis( diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index 2b40e40ae2..6659335aba 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -1641,11 +1641,11 @@ mod test { use hotshot_types::data::EpochNumber; use hotshot_types::data::Leaf2; use hotshot_types::data::{QuorumProposal2, QuorumProposalWrapper}; + use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::traits::block_contents::Transaction; use hotshot_types::{ data::{Leaf, ViewNumber}, message::Proposal, - simple_certificate::QuorumCertificate, traits::{ block_contents::vid_commitment, node_implementation::ConsensusTime, signature_key::BuilderSignatureKey, @@ -4319,12 +4319,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, @@ -4395,12 +4394,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, @@ -4462,12 +4460,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, diff --git a/hotshot-builder-core/src/testing/basic_test.rs b/hotshot-builder-core/src/testing/basic_test.rs index 098a16f349..3a12d9a76c 100644 --- a/hotshot-builder-core/src/testing/basic_test.rs +++ b/hotshot-builder-core/src/testing/basic_test.rs @@ -3,7 +3,7 @@ pub use hotshot_types::{ data::{EpochNumber, Leaf, ViewNumber}, message::Proposal, signature_key::BLSPubKey, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, traits::{ block_contents::BlockPayload, node_implementation::{ConsensusTime, NodeType}, @@ -167,12 +167,11 @@ mod tests { let mut previous_commitment = initial_commitment; let mut previous_view = ViewNumber::new(0); let mut previous_quorum_proposal = { - let previous_jc = QuorumCertificate::::genesis::( + let previous_jc = QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(); + .await; QuorumProposalWrapper:: { proposal: QuorumProposal2:: { diff --git a/hotshot-builder-core/src/testing/finalization_test.rs b/hotshot-builder-core/src/testing/finalization_test.rs index 0e225b6ac6..0e2e4d97bf 100644 --- a/hotshot-builder-core/src/testing/finalization_test.rs +++ b/hotshot-builder-core/src/testing/finalization_test.rs @@ -21,10 +21,10 @@ use hotshot_example_types::{ node_types::{TestTypes, TestVersions}, state_types::{TestInstanceState, TestValidatedState}, }; +use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::{ data::{DaProposal2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::QuorumCertificate, traits::{ block_contents::{vid_commitment, BlockHeader}, node_implementation::ConsensusTime, @@ -321,12 +321,11 @@ async fn progress_round_with_transactions( proposal: QuorumProposal2:: { block_header, view_number: next_view, - justify_qc: QuorumCertificate::::genesis::( + justify_qc: QuorumCertificate2::::genesis::( &TestValidatedState::default(), &TestInstanceState::default(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_epoch_justify_qc: None, diff --git a/hotshot-builder-core/src/testing/mod.rs b/hotshot-builder-core/src/testing/mod.rs index 60c0f4f513..1d77efb574 100644 --- a/hotshot-builder-core/src/testing/mod.rs +++ b/hotshot-builder-core/src/testing/mod.rs @@ -15,7 +15,7 @@ use hotshot::{ use hotshot_types::{ data::{DaProposal2, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, traits::{block_contents::vid_commitment, node_implementation::ConsensusTime}, utils::BuilderCommitment, @@ -174,12 +174,13 @@ pub async fn calc_proposal_msg( }; let justify_qc = match prev_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), + None => { + QuorumCertificate2::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } Some(prev_proposal) => { let prev_justify_qc = prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index 89716ce9d7..26e76abbb3 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -155,7 +155,7 @@ where // need to select the total number of VID rows and the number of present VID rows with a // NULL share. let sql = "SELECT l.max_height, l.total_leaves, p.null_payloads, v.total_vid, vn.null_vid, pruned_height FROM - (SELECT max(leaf.height) AS max_height, count(*) AS total_leaves FROM leaf2) AS l, + (SELECT max(leaf2.height) AS max_height, count(*) AS total_leaves FROM leaf2) AS l, (SELECT count(*) AS null_payloads FROM payload WHERE data IS NULL) AS p, (SELECT count(*) AS total_vid FROM vid) AS v, (SELECT count(*) AS null_vid FROM vid WHERE share IS NULL) AS vn, diff --git a/marketplace-builder-shared/src/testing/consensus.rs b/marketplace-builder-shared/src/testing/consensus.rs index 3a1405fff4..789bec3272 100644 --- a/marketplace-builder-shared/src/testing/consensus.rs +++ b/marketplace-builder-shared/src/testing/consensus.rs @@ -19,7 +19,7 @@ use hotshot_example_types::{ use hotshot_types::{ data::{DaProposal2, EpochNumber, Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, - simple_certificate::{QuorumCertificate, SimpleCertificate, SuccessThreshold}, + simple_certificate::{QuorumCertificate2, SimpleCertificate, SuccessThreshold}, simple_vote::QuorumData2, traits::{block_contents::vid_commitment, node_implementation::ConsensusTime}, }; @@ -93,12 +93,13 @@ impl SimulatedChainState { }; let justify_qc = match self.previous_quorum_proposal.as_ref() { - None => QuorumCertificate::::genesis::( - &TestValidatedState::default(), - &TestInstanceState::default(), - ) - .await - .to_qc2(), + None => { + QuorumCertificate2::::genesis::( + &TestValidatedState::default(), + &TestInstanceState::default(), + ) + .await + } Some(prev_proposal) => { let prev_justify_qc = &prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 3602480e22..5165b9f5c7 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1097,7 +1097,7 @@ mod api_tests { use espresso_types::MockSequencerVersions; use espresso_types::{ traits::{EventConsumer, PersistenceOptions}, - Header, Leaf, Leaf2, NamespaceId, + Header, Leaf2, NamespaceId, }; use ethers::utils::Anvil; use futures::{future, stream::StreamExt}; @@ -1112,7 +1112,6 @@ mod api_tests { data::{QuorumProposal2, QuorumProposalWrapper}, event::LeafInfo, message::Proposal, - simple_certificate::QuorumCertificate, traits::{node_implementation::ConsensusTime, signature_key::SignatureKey, EncodeBytes}, vid::vid_scheme, }; @@ -1298,12 +1297,11 @@ mod api_tests { proposal: QuorumProposal2:: { block_header: genesis.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1488,13 +1486,13 @@ mod api_tests { )); let consumer = ApiEventConsumer::from(data_source.clone()); - let mut qc = QuorumCertificate::genesis::( + let mut qc = QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(); - let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + .await; + let leaf = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; // Append the genesis leaf. We don't use this for the test, because the update function will // automatically fill in the missing data for genesis. We just append this to get into a @@ -1503,7 +1501,7 @@ mod api_tests { persistence .append_decided_leaves( leaf.view_number(), - [(&leaf_info(leaf.clone().into()), qc.clone())], + [(&leaf_info(leaf.clone()), qc.clone())], &consumer, ) .await diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 04ffcc7065..2a5b186be9 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -194,12 +194,11 @@ mod persistence_tests { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -257,7 +256,7 @@ mod persistence_tests { encoded_transactions: leaf_payload_bytes_arc.clone(), metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), - epoch: Some(EpochNumber::new(0)), + epoch: None, }; let da_proposal = Proposal { @@ -324,6 +323,7 @@ mod persistence_tests { ); let quorum_proposal1 = quorum_proposal.clone(); + storage .append_quorum_proposal2(&quorum_proposal1) .await diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index b5d8cf344a..4d1a872baa 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -9,8 +9,8 @@ use espresso_types::{ use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, - VidDisperseShare, VidDisperseShare2, + DaProposal, DaProposal2, QuorumProposal, QuorumProposalWrapper, VidDisperseShare, + VidDisperseShare2, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -835,27 +835,25 @@ impl SequencerPersistence for Persistence { let proposal_bytes = fs::read(file)?; // Then, deserialize. - let proposal: Proposal> = - match bincode::deserialize(&proposal_bytes) { - Ok(proposal) => proposal, - Err(err) => { - // At this point, if the file contents are invalid, it is most likely an - // error rather than a miscellaneous file somehow ending up in the - // directory. However, we continue on, because it is better to collect as - // many proposals as we can rather than letting one bad proposal cause the - // entire operation to fail, and it is still possible that this was just - // some unintended file whose name happened to match the naming convention. - tracing::warn!( - view_number, - "ignoring malformed quorum proposal file: {err:#}" - ); - continue; - } - }; - let proposal2 = convert_proposal(proposal); + let proposal = match bincode::deserialize(&proposal_bytes) { + Ok(proposal) => proposal, + Err(err) => { + // At this point, if the file contents are invalid, it is most likely an + // error rather than a miscellaneous file somehow ending up in the + // directory. However, we continue on, because it is better to collect as + // many proposals as we can rather than letting one bad proposal cause the + // entire operation to fail, and it is still possible that this was just + // some unintended file whose name happened to match the naming convention. + tracing::warn!( + view_number, + "ignoring malformed quorum proposal file: {err:#}" + ); + continue; + } + }; // Push to the map and we're done. - map.insert(ViewNumber::new(view_number), proposal2); + map.insert(ViewNumber::new(view_number), proposal); } Ok(map) @@ -869,10 +867,9 @@ impl SequencerPersistence for Persistence { let dir_path = inner.quorum_proposals2_dir_path(); let file_path = dir_path.join(view.to_string()).with_extension("txt"); let bytes = fs::read(file_path)?; - let proposal: Proposal> = bincode::deserialize(&bytes)?; - // TODO: rather than converting, we should store the value of QuorumProposalWrapper::with_epoch - let proposal_wrapper = convert_proposal(proposal); - Ok(proposal_wrapper) + let proposal = bincode::deserialize(&bytes)?; + + Ok(proposal) } async fn load_upgrade_certificate( @@ -1247,7 +1244,7 @@ impl SequencerPersistence for Persistence { let file_path = qp2_path.join(view.to_string()).with_extension("txt"); - let proposal2: Proposal> = + let proposal2: Proposal> = convert_proposal(proposal); inner.replace( @@ -1367,6 +1364,7 @@ mod test { use espresso_types::{NodeState, PubKey}; use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestVersions; + use hotshot_types::data::QuorumProposal2; use sequencer_utils::test_utils::setup_test; use serde_json::json; use std::marker::PhantomData; @@ -1710,12 +1708,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: ViewNumber::genesis(), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &Default::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1743,8 +1740,8 @@ mod test { // Change one of the file extensions. It can happen that we end up with files with the wrong // extension if, for example, the node is killed before cleaning up a swap file. fs::rename( - tmp.path().join("quorum_proposals/1.txt"), - tmp.path().join("quorum_proposals/1.swp"), + tmp.path().join("quorum_proposals2/1.txt"), + tmp.path().join("quorum_proposals2/1.swp"), ) .unwrap(); @@ -1775,12 +1772,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: ViewNumber::new(1), - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &Default::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -1793,9 +1789,9 @@ mod test { }; // First store an invalid quorum proposal. - fs::create_dir_all(tmp.path().join("quorum_proposals")).unwrap(); + fs::create_dir_all(tmp.path().join("quorum_proposals2")).unwrap(); fs::write( - tmp.path().join("quorum_proposals/0.txt"), + tmp.path().join("quorum_proposals2/0.txt"), "invalid data".as_bytes(), ) .unwrap(); diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index e706599086..ec7b664547 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1139,9 +1139,8 @@ impl SequencerPersistence for Persistence { let view: i64 = row.get("view"); let view_number: ViewNumber = ViewNumber::new(view.try_into()?); let bytes: Vec = row.get("data"); - let proposal: Proposal> = - bincode::deserialize(&bytes)?; - Ok((view_number, convert_proposal(proposal))) + let proposal = bincode::deserialize(&bytes)?; + Ok((view_number, proposal)) }) .collect::>>()?, )) @@ -1157,9 +1156,9 @@ impl SequencerPersistence for Persistence { .bind(view.u64() as i64) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; + let proposal = bincode::deserialize(&data)?; - Ok(convert_proposal(proposal)) + Ok(proposal) } async fn append_vid( @@ -1222,6 +1221,7 @@ impl SequencerPersistence for Persistence { proposal: &Proposal>, ) -> anyhow::Result<()> { let view_number = proposal.data.view_number().u64(); + let proposal_bytes = bincode::serialize(&proposal).context("serializing proposal")?; let leaf_hash = Committable::commit(&Leaf2::from_quorum_proposal(&proposal.data)); let mut tx = self.db.write().await?; @@ -1616,10 +1616,12 @@ impl SequencerPersistence for Persistence { let leaf_hash: String = row.try_get("leaf_hash")?; let data: Vec = row.try_get("data")?; - let quorum_proposal: QuorumProposal = bincode::deserialize(&data)?; - let quorum_proposal2: QuorumProposal2 = quorum_proposal.into(); + let quorum_proposal: Proposal> = + bincode::deserialize(&data)?; + let quorum_proposal2: Proposal> = + convert_proposal(quorum_proposal); - let view = quorum_proposal2.view_number().u64() as i64; + let view = quorum_proposal2.data.view_number().u64() as i64; let data = bincode::serialize(&quorum_proposal2)?; values.push((view, leaf_hash, data)); @@ -2145,12 +2147,15 @@ mod test { .unwrap(); let payload_commitment = disperse.commit; let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = VidDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, share: disperse.shares[0].clone(), common: disperse.common, recipient_key: pubkey, + epoch: None, + target_epoch: None, + data_epoch_payload_commitment: None, } .to_proposal(&privkey) .unwrap() @@ -2180,10 +2185,11 @@ mod test { let block_payload_signature = BLSPubKey::sign(&privkey, &leaf_payload_bytes_arc) .expect("Failed to sign block payload"); let da_proposal = Proposal { - data: DaProposal:: { + data: DaProposal2:: { encoded_transactions: leaf_payload_bytes_arc, metadata: leaf_payload.ns_table().clone(), view_number: ViewNumber::new(0), + epoch: None, }, signature: block_payload_signature, _pd: Default::default(), @@ -2202,10 +2208,10 @@ mod test { // Add to database. storage - .append_da(&da_proposal, payload_commitment) + .append_da2(&da_proposal, payload_commitment) .await .unwrap(); - storage.append_vid(&vid_share).await.unwrap(); + storage.append_vid2(&vid_share).await.unwrap(); storage .append_quorum_proposal2(&quorum_proposal) .await @@ -2290,12 +2296,11 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: data_view, - justify_qc: QuorumCertificate::genesis::( + justify_qc: QuorumCertificate2::genesis::( &ValidatedState::default(), &NodeState::mock(), ) - .await - .to_qc2(), + .await, upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, @@ -2453,16 +2458,16 @@ mod test { BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) .expect("Failed to sign quorum proposal"); - let proposal_bytes = bincode::serialize(&quorum_proposal) - .context("serializing proposal") - .unwrap(); - let proposal = Proposal { data: quorum_proposal.clone(), signature: quorum_proposal_signature, _pd: PhantomData, }; + let proposal_bytes = bincode::serialize(&proposal) + .context("serializing proposal") + .unwrap(); + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); leaf.fill_block_payload(payload, 4).unwrap(); From 4745aa6f2c1fa3002fd219cf11ed50c5fea4de3d Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 6 Feb 2025 20:58:15 +0500 Subject: [PATCH 032/120] fix fetching --- sequencer/src/persistence/sql.rs | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index ec7b664547..31268c4da2 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1846,7 +1846,7 @@ impl Provider for Persistence { }; let bytes = match query_as::<(Vec,)>( - "SELECT data FROM vid_share WHERE payload_hash = $1 LIMIT 1", + "SELECT data FROM vid_share2 WHERE payload_hash = $1 LIMIT 1", ) .bind(req.0.to_string()) .fetch_optional(tx.as_mut()) @@ -1860,7 +1860,7 @@ impl Provider for Persistence { } }; - let share: Proposal> = + let share: Proposal> = match bincode::deserialize(&bytes) { Ok(share) => share, Err(err) => { @@ -1886,7 +1886,7 @@ impl Provider for Persistence { }; let bytes = match query_as::<(Vec,)>( - "SELECT data FROM da_proposal WHERE payload_hash = $1 LIMIT 1", + "SELECT data FROM da_proposal2 WHERE payload_hash = $1 LIMIT 1", ) .bind(req.0.to_string()) .fetch_optional(tx.as_mut()) @@ -1900,7 +1900,7 @@ impl Provider for Persistence { } }; - let proposal: Proposal> = match bincode::deserialize(&bytes) + let proposal: Proposal> = match bincode::deserialize(&bytes) { Ok(proposal) => proposal, Err(err) => { @@ -2161,18 +2161,16 @@ mod test { .unwrap() .clone(); - let quorum_proposal = QuorumProposalWrapper:: { - proposal: QuorumProposal2:: { - block_header: leaf.block_header().clone(), - view_number: leaf.view_number(), - justify_qc: leaf.justify_qc(), - upgrade_certificate: None, - view_change_evidence: None, - next_drb_result: None, - next_epoch_justify_qc: None, - }, - with_epoch: false, - }; + let quorum_proposal: QuorumProposalWrapper = QuorumProposal2:: { + block_header: leaf.block_header().clone(), + view_number: leaf.view_number(), + justify_qc: leaf.justify_qc(), + upgrade_certificate: None, + view_change_evidence: None, + next_drb_result: None, + next_epoch_justify_qc: None, + } + .into(); let quorum_proposal_signature = BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) .expect("Failed to sign quorum proposal"); From a84549ddf53959b2c781c82d8558bb604bc7d86d Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 6 Feb 2025 21:32:46 +0500 Subject: [PATCH 033/120] fix leaf from proposal --- sequencer/src/api/sql.rs | 7 ++++--- sequencer/src/persistence/sql.rs | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index d5dcb5039e..a25ad5e3c9 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -21,7 +21,7 @@ use hotshot_query_service::{ Resolvable, }; use hotshot_types::{ - data::{QuorumProposal2, ViewNumber}, + data::{QuorumProposal2, QuorumProposalWrapper, ViewNumber}, message::Proposal, traits::node_implementation::ConsensusTime, }; @@ -449,8 +449,9 @@ where .bind(param) .fetch_one(tx.as_mut()) .await?; - let proposal: Proposal> = bincode::deserialize(&data)?; - Ok(Leaf2::from_quorum_proposal(&proposal.data.into())) + let proposal: Proposal> = + bincode::deserialize(&data)?; + Ok(Leaf2::from_quorum_proposal(&proposal.data)) } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 31268c4da2..c24a82dafe 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -31,8 +31,8 @@ use hotshot_query_service::{ use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, - VidDisperseShare, VidDisperseShare2, + DaProposal, DaProposal2, QuorumProposal, QuorumProposalWrapper, VidDisperseShare, + VidDisperseShare2, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -1972,12 +1972,12 @@ async fn fetch_leaf_from_proposals( return Ok(None); }; - let proposal: Proposal> = + let proposal: Proposal> = bincode::deserialize(&proposal_bytes).context("deserializing quorum proposal")?; let qc: QuorumCertificate2 = bincode::deserialize(&qc_bytes).context("deserializing quorum certificate")?; - let leaf = Leaf2::from_quorum_proposal(&proposal.data.into()); + let leaf = Leaf2::from_quorum_proposal(&proposal.data); Ok(Some((leaf, qc))) } From a6df3db6c6580e6566ca0f2e4fb1ff5a233063ac Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 6 Feb 2025 21:33:40 +0500 Subject: [PATCH 034/120] fix import --- sequencer/src/api/sql.rs | 2 +- sequencer/src/persistence/sql.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index a25ad5e3c9..f08bd52eb2 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -21,7 +21,7 @@ use hotshot_query_service::{ Resolvable, }; use hotshot_types::{ - data::{QuorumProposal2, QuorumProposalWrapper, ViewNumber}, + data::{QuorumProposalWrapper, ViewNumber}, message::Proposal, traits::node_implementation::ConsensusTime, }; diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index c24a82dafe..6ed5170063 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -2042,7 +2042,7 @@ mod test { use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::EpochNumber, + data::{EpochNumber, QuorumProposal2}, message::convert_proposal, simple_certificate::QuorumCertificate, simple_vote::QuorumData, From aa31ce96e6257fba6e925acb7fbac72a4e7fbdf4 Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 6 Feb 2025 16:40:56 -0300 Subject: [PATCH 035/120] TODO questions --- sequencer/src/genesis.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index aa247ce614..0b5d5e3526 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -83,6 +83,7 @@ impl Genesis { } impl Genesis { + // TODO `validate_stake_table_contract` and wrapper `validate_contracts` pub async fn validate_fee_contract(&self, l1_rpc_url: Url) -> anyhow::Result<()> { let l1 = L1Client::new(l1_rpc_url); @@ -100,7 +101,8 @@ impl Genesis { // now iterate over each upgrade type and validate the fee contract if it exists for (version, upgrade) in &self.upgrades { let chain_config = &upgrade.upgrade_type.chain_config(); - + // Is this not an error case? Isn't a chain config a + // requirement? At least for most versions? if chain_config.is_none() { continue; } From a4e0b58c159bfc2e11f6047f78b5032b457a7482 Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 6 Feb 2025 16:41:12 -0300 Subject: [PATCH 036/120] add backtrace to justfile disabled --- justfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/justfile b/justfile index 1e0054356f..a8f72c972b 100644 --- a/justfile +++ b/justfile @@ -1,3 +1,5 @@ +# export RUST_BACKTRACE := "1" + default: just --list From 7203717a7d4b97c819aa18a26fa8e844bdef5cb0 Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 6 Feb 2025 16:41:51 -0300 Subject: [PATCH 037/120] TODO --- sequencer/src/bin/espresso-bridge.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 2ae2c02632..e822aff838 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -231,6 +231,8 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { } }; + // TODO this appears to be broken. We often hit the `else` block + // when the builder was in fact funded. // Confirm that the Espresso balance has increased. let final_balance = espresso .get_espresso_balance(l1.address(), Some(espresso_block)) From 2f28e000dcdf864c9aad4bbe76523b431a7835e1 Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 6 Feb 2025 16:42:00 -0300 Subject: [PATCH 038/120] comment --- sequencer/src/run.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 7bdc0428f9..be4cfc61f2 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -43,6 +43,7 @@ pub async fn main() -> anyhow::Result<()> { genesis, modules, opt, + // Specifying V0_0 disables upgrades SequencerVersions::::new(), ) .await From 946cd789d6b81ccc14975319cd2b75c1c2f2b959 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Feb 2025 02:05:56 +0500 Subject: [PATCH 039/120] fix test_fetching_providers --- sequencer/src/persistence/sql.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 6ed5170063..8cf67b05da 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -2161,7 +2161,7 @@ mod test { .unwrap() .clone(); - let quorum_proposal: QuorumProposalWrapper = QuorumProposal2:: { + let mut quorum_proposal: QuorumProposalWrapper = QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: leaf.view_number(), justify_qc: leaf.justify_qc(), @@ -2171,6 +2171,11 @@ mod test { next_epoch_justify_qc: None, } .into(); + + // Genesis leaf with_epoch returns false + // `QuorumProposal2` -> `QuorumProposalWrapper` returns true + // so we overwrite it with false here + quorum_proposal.with_epoch = leaf.with_epoch; let quorum_proposal_signature = BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) .expect("Failed to sign quorum proposal"); From 270bf0b7bdffb0ccd9e8d64dc260b71e73fe4f1e Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Feb 2025 02:32:47 +0500 Subject: [PATCH 040/120] use TestVersions from hotshot-example-types --- Cargo.lock | 3 ++- node-metrics/Cargo.toml | 3 ++- node-metrics/src/service/client_state/mod.rs | 8 +++++--- node-metrics/src/service/data_state/mod.rs | 5 +++-- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c80402f484..fb271e830f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -7584,6 +7584,7 @@ dependencies = [ "espresso-types", "futures", "hotshot", + "hotshot-example-types", "hotshot-query-service", "hotshot-stake-table", "hotshot-types", diff --git a/node-metrics/Cargo.toml b/node-metrics/Cargo.toml index eb441ca658..960bcc06ee 100644 --- a/node-metrics/Cargo.toml +++ b/node-metrics/Cargo.toml @@ -21,7 +21,8 @@ clap = { workspace = true } espresso-types = { path = "../types" } futures = { workspace = true } hotshot = { workspace = true } -hotshot-query-service = { workspace = true } +hotshot-example-types = { workspace = true } +hotshot-query-service = { workspace = true} hotshot-stake-table = { workspace = true } tokio = { workspace = true } diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index f9324ab90d..1ca1415d57 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -1201,7 +1201,7 @@ pub mod tests { channel::mpsc::{self, Sender}, SinkExt, StreamExt, }; - use hotshot_query_service::testing::mocks::MockVersions; + use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::{ @@ -1370,13 +1370,15 @@ pub mod tests { #[tokio::test(flavor = "multi_thread")] #[cfg(feature = "testing")] async fn test_process_client_handling_stream_request_latest_blocks_snapshot() { + use hotshot_example_types::node_types::TestVersions; + use super::clone_block_detail; use crate::service::data_state::create_block_detail_from_leaf; let (_, _, _, mut data_state) = create_test_data_state(); let client_thread_state = Arc::new(RwLock::new(create_test_client_thread_state())); let leaf_1 = - Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let block_1 = create_block_detail_from_leaf(&leaf_1); data_state.add_latest_block(clone_block_detail(&block_1)); @@ -1617,7 +1619,7 @@ pub mod tests { // send a new leaf let leaf = - Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let expected_block = create_block_detail_from_leaf(&leaf); let arc_expected_block = Arc::new(expected_block); diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index c88807b230..ae820f4e5d 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -572,7 +572,8 @@ mod tests { v0_99::ChainConfig, BlockMerkleTree, FeeMerkleTree, Leaf2, NodeState, ValidatedState, }; use futures::{channel::mpsc, SinkExt, StreamExt}; - use hotshot_query_service::testing::mocks::MockVersions; + + use hotshot_example_types::node_types::TestVersions; use hotshot_types::{signature_key::BLSPubKey, traits::signature_key::SignatureKey}; use std::{sync::Arc, time::Duration}; use tokio::time::timeout; @@ -628,7 +629,7 @@ mod tests { }; let instance_state = NodeState::mock(); - let sample_leaf = Leaf2::genesis::(&validated_state, &instance_state).await; + let sample_leaf = Leaf2::genesis::(&validated_state, &instance_state).await; let mut leaf_sender = leaf_sender; // We should be able to send a leaf without issue From f0e9b41eb0f2bd343bdd3e347d2a5670ef7ad776 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Feb 2025 16:52:07 +0500 Subject: [PATCH 041/120] fix migration completed check --- .../src/availability/query_data.rs | 4 +- .../src/data_source/storage/sql.rs | 25 ++-- sequencer/src/persistence/sql.rs | 130 +++++++++--------- 3 files changed, 79 insertions(+), 80 deletions(-) diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index 848d72f8ea..5c94e2f83f 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -325,7 +325,9 @@ impl BlockQueryData { where Payload: QueryablePayload, { - let leaf = Leaf::::genesis(validated_state, instance_state).await; + let leaf: Leaf2<_> = Leaf::::genesis(validated_state, instance_state) + .await + .into(); Self::new(leaf.block_header().clone(), leaf.block_payload().unwrap()) } diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index 96ad43a728..60c8ad2383 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -813,22 +813,24 @@ impl SqlStorage { pub async fn migrate_types(&self) -> anyhow::Result<()> { let mut offset = 0; let limit = 10000; + let mut tx = self.read().await.map_err(|err| QueryError::Error { + message: err.to_string(), + })?; + + let (is_migration_completed,) = + query_as::<(bool,)>("SELECT completed from leaf_migration LIMIT 1 ") + .fetch_one(tx.as_mut()) + .await?; + if is_migration_completed { + tracing::info!("leaf1 to leaf2 migration already completed"); + return Ok(()); + } loop { let mut tx = self.read().await.map_err(|err| QueryError::Error { message: err.to_string(), })?; - let (is_migration_completed,) = - query_as::<(bool,)>("SELECT completed from leaf_migration LIMIT 1 ") - .fetch_one(tx.as_mut()) - .await?; - - if is_migration_completed { - tracing::info!("leaf1 to leaf2 migration already completed"); - return Ok(()); - } - let rows = QueryBuilder::default() .query(&format!( "SELECT leaf, qc FROM leaf ORDER BY height LIMIT {} OFFSET {}", @@ -840,8 +842,7 @@ impl SqlStorage { drop(tx); if rows.is_empty() { - tracing::info!("no leaf1 rows found"); - return Ok(()); + break; } let mut leaf_rows = Vec::new(); diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 8cf67b05da..ed8dad55ec 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1293,22 +1293,21 @@ impl SequencerPersistence for Persistence { async fn migrate_anchor_leaf(&self) -> anyhow::Result<()> { let batch_size: i64 = 1000; let mut offset: i64 = 0; + let mut tx = self.db.read().await?; - loop { - let mut tx = self.db.read().await?; - - let (is_completed,) = query_as::<(bool,)>( - "SELECT completed from epoch_migration WHERE table_name = 'anchor_leaf'", - ) - .fetch_one(tx.as_mut()) - .await?; - - if is_completed { - tracing::info!("anchor leaf migration already done"); + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'anchor_leaf'", + ) + .fetch_one(tx.as_mut()) + .await?; - return Ok(()); - } + if is_completed { + tracing::info!("anchor leaf migration already done"); + return Ok(()); + } + loop { + let mut tx = self.db.read().await?; let rows = query("SELECT view, leaf, qc FROM anchor_leaf ORDER BY view LIMIT $1 OFFSET $2") .bind(batch_size) @@ -1374,22 +1373,22 @@ impl SequencerPersistence for Persistence { async fn migrate_da_proposals(&self) -> anyhow::Result<()> { let batch_size: i64 = 1000; let mut offset: i64 = 0; + let mut tx = self.db.read().await?; - loop { - let mut tx = self.db.read().await?; - - let (is_completed,) = query_as::<(bool,)>( - "SELECT completed from epoch_migration WHERE table_name = 'da_proposal'", - ) - .fetch_one(tx.as_mut()) - .await?; + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'da_proposal'", + ) + .fetch_one(tx.as_mut()) + .await?; - if is_completed { - tracing::info!("da proposals migration already done"); + if is_completed { + tracing::info!("da proposals migration already done"); - return Ok(()); - } + return Ok(()); + } + loop { + let mut tx = self.db.read().await?; let rows = query( "SELECT payload_hash, data FROM da_proposal ORDER BY view LIMIT $1 OFFSET $2", ) @@ -1454,22 +1453,21 @@ impl SequencerPersistence for Persistence { async fn migrate_vid_shares(&self) -> anyhow::Result<()> { let batch_size: i64 = 1000; let mut offset: i64 = 0; + let mut tx = self.db.read().await?; - loop { - let mut tx = self.db.read().await?; - - let (is_completed,) = query_as::<(bool,)>( - "SELECT completed from epoch_migration WHERE table_name = 'vid_share'", - ) - .fetch_one(tx.as_mut()) - .await?; - - if is_completed { - tracing::info!("vid_share migration already done"); + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'vid_share'", + ) + .fetch_one(tx.as_mut()) + .await?; - return Ok(()); - } + if is_completed { + tracing::info!("vid_share migration already done"); + return Ok(()); + } + loop { + let mut tx = self.db.read().await?; let rows = query("SELECT payload_hash, data FROM vid_share ORDER BY view LIMIT $1 OFFSET $2") .bind(batch_size) @@ -1581,22 +1579,21 @@ impl SequencerPersistence for Persistence { async fn migrate_quorum_proposals(&self) -> anyhow::Result<()> { let batch_size: i64 = 1000; let mut offset: i64 = 0; + let mut tx = self.db.read().await?; - loop { - let mut tx = self.db.read().await?; - - let (is_completed,) = query_as::<(bool,)>( - "SELECT completed from epoch_migration WHERE table_name = 'quorum_proposals'", - ) - .fetch_one(tx.as_mut()) - .await?; - - if is_completed { - tracing::info!("quorum proposals migration already done"); + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'quorum_proposals'", + ) + .fetch_one(tx.as_mut()) + .await?; - return Ok(()); - } + if is_completed { + tracing::info!("quorum proposals migration already done"); + return Ok(()); + } + loop { + let mut tx = self.db.read().await?; let rows = query("SELECT view, leaf_hash, data FROM quorum_proposals ORDER BY view LIMIT $1 OFFSET $2") .bind(batch_size) @@ -1662,22 +1659,21 @@ impl SequencerPersistence for Persistence { async fn migrate_quorum_certificates(&self) -> anyhow::Result<()> { let batch_size: i64 = 1000; let mut offset: i64 = 0; + let mut tx = self.db.read().await?; - loop { - let mut tx = self.db.read().await?; - - let (is_completed,) = query_as::<(bool,)>( - "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificate'", - ) - .fetch_one(tx.as_mut()) - .await?; - - if is_completed { - tracing::info!(" quorum certificates migration already done"); + let (is_completed,) = query_as::<(bool,)>( + "SELECT completed from epoch_migration WHERE table_name = 'quorum_certificate'", + ) + .fetch_one(tx.as_mut()) + .await?; - return Ok(()); - } + if is_completed { + tracing::info!(" quorum certificates migration already done"); + return Ok(()); + } + loop { + let mut tx = self.db.read().await?; let rows = query("SELECT view, leaf_hash, data FROM quorum_certificate ORDER BY view LIMIT $1 OFFSET $2") .bind(batch_size) @@ -1855,7 +1851,7 @@ impl Provider for Persistence { Ok(Some((bytes,))) => bytes, Ok(None) => return None, Err(err) => { - tracing::warn!("error loading VID share: {err:#}"); + tracing::error!("error loading VID share: {err:#}"); return None; } }; @@ -1864,7 +1860,7 @@ impl Provider for Persistence { match bincode::deserialize(&bytes) { Ok(share) => share, Err(err) => { - tracing::warn!("error decoding VID share: {err:#}"); + tracing::error!("error decoding VID share: {err:#}"); return None; } }; @@ -1904,7 +1900,7 @@ impl Provider for Persistence { { Ok(proposal) => proposal, Err(err) => { - tracing::warn!("error decoding DA proposal: {err:#}"); + tracing::error!("error decoding DA proposal: {err:#}"); return None; } }; From 98a6969d2c3feb6fa26eae13e49c7f70753851a1 Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 7 Feb 2025 15:44:59 -0300 Subject: [PATCH 042/120] version features --- Cargo.toml | 16 ---------------- sequencer/Cargo.toml | 4 ++++ sequencer/src/run.rs | 22 +++++++++++++++++++++- tests/smoke.rs | 1 + 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c562240ca3..d4a38723f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -180,19 +180,3 @@ paste = "1.0" rand = "0.8.5" time = "0.3" trait-set = "0.3.0" - -[profile.dev] -# No optimizations -opt-level = 0 -# Skip compiling the debug information. -debug = false -# Skip linking symbols. -strip = true -[profile.test] -opt-level = 1 -[profile.test.package.tests] -opt-level = 0 -[profile.test.package.client] -opt-level = 0 -[profile.test.package.hotshot-state-prover] -opt-level = 3 diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 65d1a1b49b..3a49f31185 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [features] +default = ["pos"] testing = [ "hotshot-testing", "marketplace-builder-core", @@ -15,6 +16,9 @@ testing = [ ] benchmarking = [] embedded-db = ["hotshot-query-service/embedded-db"] +fee = [] +pos = [] +marketplace = [] [[bin]] name = "espresso-dev-node" diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index c8862b1a8b..d6efcf2314 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -38,6 +38,17 @@ pub async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { + #[cfg(all(feature = "fee", feature = "pos"))] + (FeeVersion::VERSION, EpochVersion::VERSION) => { + run( + genesis, + modules, + opt, + SequencerVersions::::new(), + ) + .await + } + #[cfg(feature = "pos")] (EpochVersion::VERSION, _) => { run( genesis, @@ -48,7 +59,16 @@ pub async fn main() -> anyhow::Result<()> { ) .await } - + #[cfg(all(feature = "fee", feature = "marketplace"))] + (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { + run( + genesis, + modules, + opt, + SequencerVersions::::new(), + ) + .await + } _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), diff --git a/tests/smoke.rs b/tests/smoke.rs index d154fc03dd..69ea3be272 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -30,6 +30,7 @@ async fn test_smoke() -> Result<()> { let mut state_retries = 0; let mut txn_retries = 0; while (sub.next().await).is_some() { + dbg!("next"); let new = testing.test_state().await; println!("New State:{}", new); From 711c7b9300df4280edc38a416b65b68db9554f21 Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 7 Feb 2025 17:30:33 -0300 Subject: [PATCH 043/120] I think build jet has more space (lets find out) --- .github/workflows/cargo-features.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cargo-features.yml b/.github/workflows/cargo-features.yml index 572cbd24c0..2b39663a93 100644 --- a/.github/workflows/cargo-features.yml +++ b/.github/workflows/cargo-features.yml @@ -19,7 +19,7 @@ concurrency: jobs: cargo-features: - runs-on: ubuntu-latest + runs-on: buildjet-8vcpu-ubuntu-2204 steps: - uses: taiki-e/install-action@cargo-hack From dc25ada5892fb3254ac2343a9e164874b844e8e4 Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 7 Feb 2025 17:32:52 -0300 Subject: [PATCH 044/120] Revert "I think build jet has more space (lets find out)" This reverts commit 711c7b9300df4280edc38a416b65b68db9554f21. --- .github/workflows/cargo-features.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cargo-features.yml b/.github/workflows/cargo-features.yml index 2b39663a93..572cbd24c0 100644 --- a/.github/workflows/cargo-features.yml +++ b/.github/workflows/cargo-features.yml @@ -19,7 +19,7 @@ concurrency: jobs: cargo-features: - runs-on: buildjet-8vcpu-ubuntu-2204 + runs-on: ubuntu-latest steps: - uses: taiki-e/install-action@cargo-hack From e951d1d3a1769f0cb628c53be878384c02e91145 Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 7 Feb 2025 18:36:57 -0300 Subject: [PATCH 045/120] test epoch test --- sequencer/src/api.rs | 87 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 5165b9f5c7..063822a16f 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1605,6 +1605,7 @@ mod test { persistence::no_storage, testing::{TestConfig, TestConfigBuilder}, }; + use espresso_types::EpochVersion; #[tokio::test(flavor = "multi_thread")] async fn test_healthcheck() { @@ -2246,6 +2247,36 @@ mod test { test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; } + #[tokio::test(flavor = "multi_thread")] + async fn test_pos_upgrade_view_based() { + setup_test(); + + let mut upgrades = std::collections::BTreeMap::new(); + type MySequencerVersions = SequencerVersions; + + let mode = UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + }); + + let upgrade_type = UpgradeType::Marketplace { + chain_config: ChainConfig { + max_block_size: 400.into(), + base_fee: 2.into(), + bid_recipient: Some(Default::default()), + ..Default::default() + }, + }; + + upgrades.insert( + ::Upgrade::VERSION, + Upgrade { mode, upgrade_type }, + ); + test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; + } + async fn test_upgrade_helper( upgrades: BTreeMap, bind_version: MockSeqVersions, @@ -2570,6 +2601,62 @@ mod test { let mut receive_count = 0; loop { let event = subscribed_events.next().await.unwrap(); + dbg!(&event); + tracing::info!( + "Received event in hotshot event streaming Client 1: {:?}", + event + ); + receive_count += 1; + if receive_count > total_count { + tracing::info!("Client Received at least desired events, exiting loop"); + break; + } + } + assert_eq!(receive_count, total_count + 1); + } + // TODO instead of as above, listen to events until we get at least to view 3 + // maybe put in the slow category + #[tokio::test(flavor = "multi_thread")] + async fn test_hotshot_event_streaming_epoch_progression() { + setup_test(); + + let hotshot_event_streaming_port = + pick_unused_port().expect("No ports free for hotshot event streaming"); + let query_service_port = pick_unused_port().expect("No ports free for query service"); + + let url = format!("http://localhost:{hotshot_event_streaming_port}") + .parse() + .unwrap(); + + let hotshot_events = HotshotEvents { + events_service_port: hotshot_event_streaming_port, + }; + + let client: Client = Client::new(url); + + let options = Options::with_port(query_service_port).hotshot_events(hotshot_events); + + let anvil = Anvil::new().spawn(); + let l1 = anvil.endpoint().parse().unwrap(); + let network_config = TestConfigBuilder::default().l1_url(l1).build(); + let config = TestNetworkConfigBuilder::default() + .api_config(options) + .network_config(network_config) + .build(); + let _network = TestNetwork::new(config, MockSequencerVersions::new()).await; + + let mut subscribed_events = client + .socket("hotshot-events/events") + .subscribe::>() + .await + .unwrap(); + + let total_count = 5; + // wait for these events to receive on client 1 + let mut receive_count = 0; + loop { + let event = subscribed_events.next().await.unwrap(); + dbg!(&event); tracing::info!( "Received event in hotshot event streaming Client 1: {:?}", event From f0f9f494a39ebab64e86a2dea7ece71d974407a8 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Feb 2025 14:59:07 +0500 Subject: [PATCH 046/120] call v2 methods --- types/src/v0/traits.rs | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 82be9ffa1e..e637be1a2c 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -782,6 +782,13 @@ impl Storage for Arc

{ (**self).append_vid(proposal).await } + async fn append_vid2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + (**self).append_vid2(proposal).await + } + async fn append_da( &self, proposal: &Proposal>, @@ -790,6 +797,14 @@ impl Storage for Arc

{ (**self).append_da(proposal, vid_commit).await } + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> anyhow::Result<()> { + (**self).append_da2(proposal, vid_commit).await + } + async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()> { (**self).record_action(view, action).await } @@ -825,6 +840,22 @@ impl Storage for Arc

{ .await } + async fn append_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()> { + // TODO: this is a bug in hotshot with makes with_epoch = true + // when converting from qp2 to qp wrapper + let mut proposal_qp_wrapper: Proposal> = + convert_proposal(proposal.clone()); + proposal_qp_wrapper.data.with_epoch = false; + (**self).append_quorum_proposal2(&proposal_qp_wrapper).await + } + + async fn update_high_qc2(&self, high_qc: QuorumCertificate2) -> anyhow::Result<()> { + self.update_high_qc2(high_qc).await + } + async fn update_decided_upgrade_certificate( &self, decided_upgrade_certificate: Option>, @@ -833,6 +864,14 @@ impl Storage for Arc

{ .store_upgrade_certificate(decided_upgrade_certificate) .await } + + async fn update_undecided_state2( + &self, + leaves: CommitmentMap, + state: BTreeMap>, + ) -> anyhow::Result<()> { + (**self).update_undecided_state2(leaves, state).await + } } /// Data that can be deserialized from a subslice of namespace payload bytes. From 26fbdad0b3d28ea9c52e3d387bb49cc2ad98f61d Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Feb 2025 16:40:52 +0500 Subject: [PATCH 047/120] fix VID errors --- .../src/testing/basic.rs | 2 +- .../src/availability/query_data.rs | 2 +- .../src/data_source/storage/sql.rs | 18 +++- .../src/data_source/update.rs | 12 +-- sequencer/src/api.rs | 8 +- sequencer/src/persistence.rs | 32 ++++--- sequencer/src/persistence/fs.rs | 43 +++++---- sequencer/src/persistence/no_storage.rs | 9 +- sequencer/src/persistence/sql.rs | 92 ++++++++++--------- types/src/v0/traits.rs | 42 +++++++-- 10 files changed, 156 insertions(+), 104 deletions(-) diff --git a/hotshot-builder-core-refactored/src/testing/basic.rs b/hotshot-builder-core-refactored/src/testing/basic.rs index 3fa917d50e..1868077b1c 100644 --- a/hotshot-builder-core-refactored/src/testing/basic.rs +++ b/hotshot-builder-core-refactored/src/testing/basic.rs @@ -7,7 +7,7 @@ use hotshot_example_types::node_types::{TestTypes, TestVersions}; use hotshot_example_types::state_types::{TestInstanceState, TestValidatedState}; use hotshot_types::data::{Leaf2, QuorumProposal2, QuorumProposalWrapper, ViewNumber}; use hotshot_types::event::LeafInfo; -use hotshot_types::simple_certificate::QuorumCertificate; +use hotshot_types::simple_certificate::QuorumCertificate2; use hotshot_types::traits::block_contents::BlockHeader; use hotshot_types::traits::node_implementation::{ConsensusTime, NodeType}; use hotshot_types::utils::BuilderCommitment; diff --git a/hotshot-query-service/src/availability/query_data.rs b/hotshot-query-service/src/availability/query_data.rs index eb775ceb32..1e464bcfe1 100644 --- a/hotshot-query-service/src/availability/query_data.rs +++ b/hotshot-query-service/src/availability/query_data.rs @@ -325,7 +325,7 @@ impl BlockQueryData { where Payload: QueryablePayload, { - let leaf = Leaf::::genesis(validated_state, instance_state).await; + let leaf = Leaf2::::genesis::(validated_state, instance_state).await; Self::new(leaf.block_header().clone(), leaf.block_payload().unwrap()) } diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index 60c8ad2383..ec7875808d 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -1223,7 +1223,7 @@ mod test { node_types::TestVersions, state_types::{TestInstanceState, TestValidatedState}, }; - use hotshot_types::traits::EncodeBytes; + use hotshot_types::traits::{node_implementation::Versions, EncodeBytes}; use hotshot_types::{ data::{QuorumProposal, ViewNumber}, simple_vote::QuorumData, @@ -1237,6 +1237,7 @@ mod test { }; use std::time::Duration; use tokio::time::sleep; + use vbs::version::StaticVersionType; use super::{testing::TmpDb, *}; use crate::{ @@ -1244,7 +1245,7 @@ mod test { data_source::storage::{pruning::PrunedHeightStorage, UpdateAvailabilityStorage}, merklized_state::{MerklizedState, UpdateStateData}, testing::{ - mocks::{MockHeader, MockMerkleTree, MockPayload, MockTypes}, + mocks::{MockHeader, MockMerkleTree, MockPayload, MockTypes, MockVersions}, setup_test, }, }; @@ -1647,7 +1648,11 @@ mod test { >::builder_commitment(&payload, &metadata); let payload_bytes = payload.encode(); - let payload_commitment = vid_commitment(&payload_bytes, 4); + let payload_commitment = vid_commitment::( + &payload_bytes, + 4, + ::Base::VERSION, + ); let mut block_header = >::genesis( &instance_state, @@ -1679,7 +1684,12 @@ mod test { }; let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - leaf.fill_block_payload(payload, 4).unwrap(); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); qc.data.leaf_commit = as Committable>::commit(&leaf); let height = leaf.height() as i64; diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 85ce708665..314567d49f 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -22,7 +22,6 @@ use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; -use hotshot_types::{data::VidDisperseShare, event::LeafInfo}; use hotshot_types::{ data::Leaf2, traits::{ @@ -31,6 +30,7 @@ use hotshot_types::{ }, vid::advz_scheme, }; +use hotshot_types::{data::VidDisperseShare, event::LeafInfo}; use jf_vid::VidScheme; use std::iter::once; @@ -107,7 +107,7 @@ where return Err(leaf2.block_header().block_number()); } }; - let block_data = leaf + let block_data = leaf2 .block_payload() .map(|payload| BlockQueryData::new(leaf2.block_header().clone(), payload)); if block_data.is_none() { @@ -117,25 +117,25 @@ where let (vid_common, vid_share) = match vid_share { Some(VidDisperseShare::V0(share)) => ( Some(VidCommonQueryData::new( - leaf.block_header().clone(), + leaf2.block_header().clone(), share.common.clone(), )), Some(share.share.clone()), ), Some(VidDisperseShare::V1(share)) => ( Some(VidCommonQueryData::new( - leaf.block_header().clone(), + leaf2.block_header().clone(), share.common.clone(), )), Some(share.share.clone()), ), None => { - if leaf.view_number().u64() == 0 { + if leaf2.view_number().u64() == 0 { // HotShot does not run VID in consensus for the genesis block. In this case, // the block payload is guaranteed to always be empty, so VID isn't really // necessary. But for consistency, we will still store the VID dispersal data, // computing it ourselves based on the well-known genesis VID commitment. - match genesis_vid(&leaf) { + match genesis_vid(&leaf2) { Ok((common, share)) => (Some(common), Some(share)), Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 5f1f4cf2b8..750eae170e 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1106,8 +1106,10 @@ mod api_tests { AvailabilityDataSource, BlockQueryData, VidCommonQueryData, }; - use hotshot_types::data::{DaProposal2, EpochNumber, VidDisperseShare2}; + use hotshot_types::data::vid_disperse::VidDisperseShare2; + use hotshot_types::data::{DaProposal2, EpochNumber, VidDisperseShare}; use hotshot_types::simple_certificate::QuorumCertificate2; + use hotshot_types::vid::advz_scheme; use hotshot_types::{ data::{QuorumProposal2, QuorumProposalWrapper}, event::LeafInfo, @@ -1339,7 +1341,7 @@ mod api_tests { .unwrap(); // Include VID information for each leaf. - let share = VidDisperseShare2:: { + let share = VidDisperseShare::V1(VidDisperseShare2:: { view_number: leaf.view_number(), payload_commitment, share: disperse.shares[0].clone(), @@ -1348,7 +1350,7 @@ mod api_tests { epoch: Some(EpochNumber::new(0)), target_epoch: Some(EpochNumber::new(0)), data_epoch_payload_commitment: None, - }; + }); persistence .append_vid2(&share.to_proposal(&privkey).unwrap()) .await diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 049bb4554c..2cde72d5ae 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -58,8 +58,8 @@ mod persistence_tests { use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::{ data::{ - DaProposal2, EpochNumber, QuorumProposal2, QuorumProposalWrapper, VidDisperseShare2, - ViewNumber, + vid_disperse::VidDisperseShare2, DaProposal2, EpochNumber, QuorumProposal2, + QuorumProposalWrapper, VidDisperseShare, ViewNumber, }, event::{EventType, HotShotAction, LeafInfo}, message::{Proposal, UpgradeLock}, @@ -73,6 +73,7 @@ mod persistence_tests { EncodeBytes, }, vid::advz_scheme, + vote::HasViewNumber, }; use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; @@ -185,7 +186,7 @@ mod persistence_tests { .unwrap(); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); let signature = PubKey::sign(&privkey, &[]).unwrap(); - let mut vid = VidDisperseShare2:: { + let mut vid: VidDisperseShare<_> = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment: Default::default(), share: disperse.shares[0].clone(), @@ -194,7 +195,8 @@ mod persistence_tests { epoch: Some(EpochNumber::new(0)), target_epoch: Some(EpochNumber::new(0)), data_epoch_payload_commitment: None, - }; + } + .into(); let mut quorum_proposal = Proposal { data: QuorumProposalWrapper:: { proposal: QuorumProposal2:: { @@ -225,33 +227,33 @@ mod persistence_tests { Some(vid_share0.clone()) ); - vid.view_number = ViewNumber::new(1); + vid.set_view_number(ViewNumber::new(1)); let vid_share1 = vid.clone().to_proposal(&privkey).unwrap().clone(); storage.append_vid2(&vid_share1).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), + storage.load_vid_share(vid.view_number()).await.unwrap(), Some(vid_share1.clone()) ); - vid.view_number = ViewNumber::new(2); + vid.set_view_number(ViewNumber::new(2)); let vid_share2 = vid.clone().to_proposal(&privkey).unwrap().clone(); storage.append_vid2(&vid_share2).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), + storage.load_vid_share(vid.view_number()).await.unwrap(), Some(vid_share2.clone()) ); - vid.view_number = ViewNumber::new(3); + vid.set_view_number(ViewNumber::new(3)); let vid_share3 = vid.clone().to_proposal(&privkey).unwrap().clone(); storage.append_vid2(&vid_share3).await.unwrap(); assert_eq!( - storage.load_vid_share(vid.view_number).await.unwrap(), + storage.load_vid_share(vid.view_number()).await.unwrap(), Some(vid_share3.clone()) ); @@ -676,7 +678,7 @@ mod persistence_tests { .disperse(leaf_payload_bytes_arc.clone()) .unwrap(); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let mut vid = VidDisperseShare2:: { + let mut vid = VidDisperseShare::V1(VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment: Default::default(), share: disperse.shares[0].clone(), @@ -685,7 +687,7 @@ mod persistence_tests { epoch: Some(EpochNumber::new(0)), target_epoch: Some(EpochNumber::new(0)), data_epoch_payload_commitment: None, - } + }) .to_proposal(&privkey) .unwrap() .clone(); @@ -736,7 +738,7 @@ mod persistence_tests { let leaf = Leaf2::from_quorum_proposal(&quorum_proposal); qc.view_number = leaf.view_number(); qc.data.leaf_commit = Committable::commit(&leaf); - vid.data.view_number = leaf.view_number(); + vid.data.set_view_number(leaf.view_number()); da_proposal.data.view_number = leaf.view_number(); chain.push((leaf.clone(), qc.clone(), vid.clone(), da_proposal.clone())); } @@ -876,7 +878,7 @@ mod persistence_tests { .unwrap(); let payload_commitment = disperse.commit; let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = VidDisperseShare2:: { + let vid_share = VidDisperseShare::V1(VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, share: disperse.shares[0].clone(), @@ -885,7 +887,7 @@ mod persistence_tests { epoch: None, target_epoch: None, data_epoch_payload_commitment: None, - } + }) .to_proposal(&privkey) .unwrap() .clone(); diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 065c2d1f21..01fa772efd 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -9,8 +9,8 @@ use espresso_types::{ use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, DaProposal2, QuorumProposal, QuorumProposalWrapper, VidDisperseShare, - VidDisperseShare2, + vid_disperse::ADVZDisperseShare, DaProposal, DaProposal2, EpochNumber, QuorumProposal, + QuorumProposalWrapper, VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -452,7 +452,7 @@ impl Inner { fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { let dir_path = self.vid2_dir_path(); let file_path = dir_path.join(view.u64().to_string()).with_extension("txt"); @@ -462,7 +462,7 @@ impl Inner { } let vid_share_bytes = fs::read(file_path)?; - let vid_share: Proposal> = + let vid_share: Proposal> = bincode::deserialize(&vid_share_bytes)?; Ok(Some(vid_share)) } @@ -649,7 +649,7 @@ impl SequencerPersistence for Persistence { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { self.inner.read().await.load_vid_share(view) } @@ -953,7 +953,7 @@ impl SequencerPersistence for Persistence { async fn append_vid2( &self, - proposal: &Proposal>, + proposal: &Proposal>, ) -> anyhow::Result<()> { let mut inner = self.inner.write().await; let view_number = proposal.data.view_number().u64(); @@ -1154,12 +1154,12 @@ impl SequencerPersistence for Persistence { let bytes = fs::read(&path).context(format!("reading vid share {}", path.display()))?; let proposal = - bincode::deserialize::>>(&bytes) + bincode::deserialize::>>(&bytes) .context(format!("parsing vid share {}", path.display()))?; let file_path = vid2_path.join(view.to_string()).with_extension("txt"); - let proposal2: Proposal> = + let proposal2: Proposal> = convert_proposal(proposal); inner.replace( @@ -1369,8 +1369,13 @@ mod test { use espresso_types::{NodeState, PubKey}; use hotshot::types::SignatureKey; use hotshot_example_types::node_types::TestVersions; + use hotshot_query_service::testing::mocks::MockVersions; use hotshot_types::data::QuorumProposal2; + use hotshot_types::traits::node_implementation::Versions; + use hotshot_types::vid::advz_scheme; use sequencer_utils::test_utils::setup_test; + use vbs::version::StaticVersionType; + use serde_json::json; use std::marker::PhantomData; @@ -1386,7 +1391,6 @@ mod test { simple_certificate::QuorumCertificate, simple_vote::QuorumData, traits::{block_contents::vid_commitment, EncodeBytes}, - vid::vid_scheme, }; use jf_vid::VidScheme; @@ -1518,7 +1522,11 @@ mod test { let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload.encode(); - let payload_commitment = vid_commitment(&payload_bytes, 4); + let payload_commitment = vid_commitment::( + &payload_bytes, + 4, + ::Base::VERSION, + ); let block_header = Header::genesis( &instance_state, @@ -1558,7 +1566,12 @@ mod test { }; let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - leaf.fill_block_payload(payload, 4).unwrap(); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); let mut inner = storage.inner.write().await; @@ -1600,9 +1613,9 @@ mod test { .unwrap(); drop(inner); - let disperse = vid_scheme(4).disperse(payload_bytes.clone()).unwrap(); + let disperse = advz_scheme(4).disperse(payload_bytes.clone()).unwrap(); - let vid = VidDisperseShare:: { + let vid = ADVZDisperseShare:: { view_number: ViewNumber::new(i), payload_commitment: Default::default(), share: disperse.shares[0].clone(), @@ -1703,9 +1716,7 @@ mod test { let storage = Persistence::connect(&tmp).await; // Generate a couple of valid quorum proposals. - let leaf: Leaf2 = Leaf::genesis::(&Default::default(), &NodeState::mock()) - .await - .into(); + let leaf = Leaf2::genesis::(&Default::default(), &NodeState::mock()).await; let privkey = PubKey::generated_from_seed_indexed([0; 32], 1).1; let signature = PubKey::sign(&privkey, &[]).unwrap(); let mut quorum_proposal = Proposal { diff --git a/sequencer/src/persistence/no_storage.rs b/sequencer/src/persistence/no_storage.rs index dfe095bce0..573b3164ef 100644 --- a/sequencer/src/persistence/no_storage.rs +++ b/sequencer/src/persistence/no_storage.rs @@ -9,7 +9,10 @@ use espresso_types::{ }; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, DaProposal2, QuorumProposalWrapper, VidDisperseShare, VidDisperseShare2}, + data::{ + vid_disperse::ADVZDisperseShare, DaProposal, DaProposal2, EpochNumber, + QuorumProposalWrapper, VidDisperseShare, + }, event::{Event, EventType, HotShotAction, LeafInfo}, message::Proposal, simple_certificate::{NextEpochQuorumCertificate2, QuorumCertificate2, UpgradeCertificate}, @@ -104,7 +107,7 @@ impl SequencerPersistence for NoStorage { async fn load_vid_share( &self, _view: ViewNumber, - ) -> anyhow::Result>>> { + ) -> anyhow::Result>>> { Ok(None) } @@ -182,7 +185,7 @@ impl SequencerPersistence for NoStorage { async fn append_vid2( &self, - _proposal: &Proposal>, + _proposal: &Proposal>, ) -> anyhow::Result<()> { Ok(()) } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index c01c2575df..8804c2007f 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -31,8 +31,8 @@ use hotshot_query_service::{ use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, DaProposal2, QuorumProposal, QuorumProposalWrapper, VidDisperseShare, - VidDisperseShare2, + vid_disperse::ADVZDisperseShare, DaProposal, DaProposal2, EpochNumber, QuorumProposal, + QuorumProposalWrapper, VidDisperseShare, }, event::{Event, EventType, HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -583,24 +583,6 @@ pub struct Persistence { gc_opt: ConsensusPruningOptions, } -// TODO: clean up as part of VID migration -fn deserialize_vid_proposal_with_fallback( - bytes: &[u8], -) -> anyhow::Result>> { - bincode::deserialize(bytes).or_else(|err| { - tracing::warn!("error decoding VID share: {err:#}"); - match bincode::deserialize::>>(bytes) { - Ok(proposal) => Ok(convert_proposal(proposal)), - Err(err2) => { - tracing::warn!("error decoding VID share fallback: {err2:#}"); - Err(anyhow::anyhow!( - "Both primary and fallback deserialization failed: {err:#}, {err2:#}" - )) - } - } - }) -} - impl Persistence { /// Ensure the `leaf_hash` column is populated for all existing quorum proposals. /// @@ -1508,8 +1490,8 @@ impl SequencerPersistence for Persistence { let data: Vec = row.try_get("data")?; let payload_hash: String = row.try_get("payload_hash")?; - let vid_share: VidDisperseShare = bincode::deserialize(&data)?; - let vid_share2: VidDisperseShare2 = vid_share.into(); + let vid_share: ADVZDisperseShare = bincode::deserialize(&data)?; + let vid_share2: VidDisperseShare = vid_share.into(); let view = vid_share2.view_number().u64() as i64; let data = bincode::serialize(&vid_share2)?; @@ -1791,10 +1773,11 @@ impl SequencerPersistence for Persistence { async fn append_vid2( &self, - proposal: &Proposal>, + proposal: &Proposal>, ) -> anyhow::Result<()> { - let view = proposal.data.view_number.u64(); - let payload_hash = proposal.data.payload_commitment; + let view = proposal.data.view_number().u64(); + + let payload_hash = proposal.data.payload_commitment(); let data_bytes = bincode::serialize(proposal).unwrap(); let mut tx = self.db.write().await?; @@ -1888,7 +1871,7 @@ impl Provider for Persistence { } }; - Some(share.data.common) + Some(share.data.vid_common_ref().clone()) } } @@ -2054,15 +2037,20 @@ mod test { use super::*; use crate::{persistence::testing::TestablePersistence, BLSPubKey, PubKey}; - use espresso_types::{traits::NullEventConsumer, Leaf, NodeState, ValidatedState}; + use committable::{Commitment, CommitmentBoundsArkless}; + use espresso_types::{traits::NullEventConsumer, Header, Leaf, NodeState, ValidatedState}; use futures::stream::TryStreamExt; use hotshot_example_types::node_types::TestVersions; use hotshot_types::{ - data::{EpochNumber, QuorumProposal2}, + data::{vid_disperse::VidDisperseShare2, EpochNumber, QuorumProposal2}, message::convert_proposal, simple_certificate::QuorumCertificate, - traits::{block_contents::vid_commitment, signature_key::SignatureKey, EncodeBytes}, - vid::vid_scheme, + simple_vote::QuorumData, + traits::{ + block_contents::vid_commitment, node_implementation::Versions, + signature_key::SignatureKey, EncodeBytes, + }, + vid::advz_scheme, }; use jf_vid::VidScheme; use sequencer_utils::test_utils::setup_test; @@ -2156,7 +2144,8 @@ mod test { let storage = Persistence::connect(&tmp).await; // Mock up some data. - let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); let disperse = advz_scheme(2) @@ -2164,7 +2153,7 @@ mod test { .unwrap(); let payload_commitment = disperse.commit; let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid_share = VidDisperseShare:: { + let vid_share = VidDisperseShare2:: { view_number: ViewNumber::new(0), payload_commitment, share: disperse.shares[0].clone(), @@ -2182,13 +2171,13 @@ mod test { proposal: QuorumProposal2:: { block_header: leaf.block_header().clone(), view_number: leaf.view_number(), - justify_qc: leaf.justify_qc().to_qc2(), + justify_qc: leaf.justify_qc(), upgrade_certificate: None, view_change_evidence: None, next_drb_result: None, next_epoch_justify_qc: None, + epoch: None, }, - with_epoch: false, }; let quorum_proposal_signature = BLSPubKey::sign(&privkey, &bincode::serialize(&quorum_proposal).unwrap()) @@ -2228,7 +2217,10 @@ mod test { .append_da2(&da_proposal, payload_commitment) .await .unwrap(); - storage.append_vid2(&vid_share).await.unwrap(); + storage + .append_vid2(&convert_proposal(vid_share.clone())) + .await + .unwrap(); storage .append_quorum_proposal2(&quorum_proposal) .await @@ -2286,7 +2278,8 @@ mod test { let data_view = ViewNumber::new(1); // Populate some data. - let leaf = Leaf::genesis(&ValidatedState::default(), &NodeState::mock()).await; + let leaf = + Leaf2::genesis::(&ValidatedState::default(), &NodeState::mock()).await; let leaf_payload = leaf.block_payload().unwrap(); let leaf_payload_bytes_arc = leaf_payload.encode(); @@ -2299,7 +2292,7 @@ mod test { ::Base::VERSION, ); let (pubkey, privkey) = BLSPubKey::generated_from_seed_indexed([0; 32], 1); - let vid = VidDisperseShare:: { + let vid = VidDisperseShare::V1(VidDisperseShare2:: { view_number: data_view, payload_commitment, share: disperse.shares[0].clone(), @@ -2308,7 +2301,7 @@ mod test { epoch: None, target_epoch: None, data_epoch_payload_commitment: None, - } + }) .to_proposal(&privkey) .unwrap() .clone(); @@ -2445,7 +2438,11 @@ mod test { let builder_commitment = payload.builder_commitment(&metadata); let payload_bytes = payload.encode(); - let payload_commitment = vid_commitment(&payload_bytes, 4); + let payload_commitment = vid_commitment::( + &payload_bytes, + 4, + ::Base::VERSION, + ); let block_header = Header::genesis( &instance_state, @@ -2463,7 +2460,7 @@ mod test { null_quorum_data.commit(), view, None, - PhantomData, + std::marker::PhantomData, ); let quorum_proposal = QuorumProposal { @@ -2481,7 +2478,7 @@ mod test { let proposal = Proposal { data: quorum_proposal.clone(), signature: quorum_proposal_signature, - _pd: PhantomData, + _pd: std::marker::PhantomData, }; let proposal_bytes = bincode::serialize(&proposal) @@ -2489,7 +2486,12 @@ mod test { .unwrap(); let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); - leaf.fill_block_payload(payload, 4).unwrap(); + leaf.fill_block_payload::( + payload, + 4, + ::Base::VERSION, + ) + .unwrap(); let mut tx = storage.db.write().await.unwrap(); @@ -2506,9 +2508,9 @@ mod test { .unwrap(); tx.commit().await.unwrap(); - let disperse = vid_scheme(4).disperse(payload_bytes.clone()).unwrap(); + let disperse = advz_scheme(4).disperse(payload_bytes.clone()).unwrap(); - let vid = VidDisperseShare:: { + let vid = ADVZDisperseShare:: { view_number: ViewNumber::new(i), payload_commitment: Default::default(), share: disperse.shares[0].clone(), @@ -2584,7 +2586,7 @@ mod test { Proposal { data: qc2, signature: v.signature, - _pd: PhantomData, + _pd: std::marker::PhantomData, } }; diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index ac3a91c65e..6d81050337 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -10,8 +10,9 @@ use hotshot::{types::EventType, HotShotInitializer}; use hotshot_types::{ consensus::CommitmentMap, data::{ - DaProposal, DaProposal2, QuorumProposal, QuorumProposal2, QuorumProposalWrapper, - VidDisperseShare, VidDisperseShare2, ViewNumber, + vid_disperse::{ADVZDisperseShare, VidDisperseShare2}, + DaProposal, DaProposal2, EpochNumber, QuorumProposal, QuorumProposal2, + QuorumProposalWrapper, VidDisperseShare, ViewNumber, }, event::{HotShotAction, LeafInfo}, message::{convert_proposal, Proposal}, @@ -461,7 +462,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn load_vid_share( &self, view: ViewNumber, - ) -> anyhow::Result>>>; + ) -> anyhow::Result>>>; async fn load_da_proposal( &self, view: ViewNumber, @@ -673,12 +674,27 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { proposal: &Proposal>, vid_commit: ::Commit, ) -> anyhow::Result<()>; - async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()>; + async fn record_action( + &self, + view: ViewNumber, + epoch: Option, + action: HotShotAction, + ) -> anyhow::Result<()>; async fn update_undecided_state2( + &self, leaves: CommitmentMap, state: BTreeMap>, ) -> anyhow::Result<()>; + async fn append_quorum_proposal2( + &self, + proposal: &Proposal>, + ) -> anyhow::Result<()>; + async fn store_upgrade_certificate( + &self, + decided_upgrade_certificate: Option>, + ) -> anyhow::Result<()>; + async fn migrate_consensus( &self, _migrate_leaf: fn(Leaf) -> Leaf2, _migrate_proposal: fn( @@ -720,7 +736,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { async fn append_vid2( &self, - proposal: &Proposal>, + proposal: &Proposal>, ) -> anyhow::Result<()>; async fn append_da2( @@ -775,7 +791,9 @@ impl Storage for Arc

{ &self, proposal: &Proposal>, ) -> anyhow::Result<()> { - (**self).append_vid2(proposal).await + (**self) + .append_vid2(&convert_proposal(proposal.clone())) + .await } async fn append_da( @@ -794,8 +812,13 @@ impl Storage for Arc

{ (**self).append_da2(proposal, vid_commit).await } - async fn record_action(&self, view: ViewNumber, action: HotShotAction) -> anyhow::Result<()> { - (**self).record_action(view, action).await + async fn record_action( + &self, + view: ViewNumber, + epoch: Option, + action: HotShotAction, + ) -> anyhow::Result<()> { + (**self).record_action(view, epoch, action).await } async fn update_high_qc(&self, _high_qc: QuorumCertificate) -> anyhow::Result<()> { @@ -835,9 +858,8 @@ impl Storage for Arc

{ ) -> anyhow::Result<()> { // TODO: this is a bug in hotshot with makes with_epoch = true // when converting from qp2 to qp wrapper - let mut proposal_qp_wrapper: Proposal> = + let proposal_qp_wrapper: Proposal> = convert_proposal(proposal.clone()); - proposal_qp_wrapper.data.with_epoch = false; (**self).append_quorum_proposal2(&proposal_qp_wrapper).await } From c01b8bfbe831604a3add302cc1a9522fe8da5013 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Feb 2025 16:43:04 +0500 Subject: [PATCH 048/120] lint --- hotshot-query-service/src/data_source/update.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 314567d49f..6e15a8e360 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -135,7 +135,7 @@ where // the block payload is guaranteed to always be empty, so VID isn't really // necessary. But for consistency, we will still store the VID dispersal data, // computing it ourselves based on the well-known genesis VID commitment. - match genesis_vid(&leaf2) { + match genesis_vid(leaf2) { Ok((common, share)) => (Some(common), Some(share)), Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); From ab4013d1102cb413ee12ec014327740ca67aea3f Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Feb 2025 16:51:12 +0500 Subject: [PATCH 049/120] cargo sort --- node-metrics/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node-metrics/Cargo.toml b/node-metrics/Cargo.toml index 868cfa6bab..a325e748c6 100644 --- a/node-metrics/Cargo.toml +++ b/node-metrics/Cargo.toml @@ -22,7 +22,7 @@ espresso-types = { path = "../types" } futures = { workspace = true } hotshot = { workspace = true } hotshot-example-types = { workspace = true } -hotshot-query-service = { workspace = true} +hotshot-query-service = { workspace = true } hotshot-stake-table = { workspace = true } tokio = { workspace = true } From 3b4204732291572f2154e5e6562ef87031f2b86c Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 11 Feb 2025 01:22:05 +0500 Subject: [PATCH 050/120] fix recursion --- sequencer/src/persistence/sql.rs | 2 +- types/src/v0/traits.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 8804c2007f..926583fa4f 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -1239,7 +1239,7 @@ impl SequencerPersistence for Persistence { .await?; // We also keep track of any QC we see in case we need it to recover our archival storage. - let justify_qc = &proposal.data.justify_qc(); + let justify_qc = proposal.data.justify_qc(); let justify_qc_bytes = bincode::serialize(&justify_qc).context("serializing QC")?; tx.upsert( "quorum_certificate2", diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index 6d81050337..51dc634603 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -863,8 +863,8 @@ impl Storage for Arc

{ (**self).append_quorum_proposal2(&proposal_qp_wrapper).await } - async fn update_high_qc2(&self, high_qc: QuorumCertificate2) -> anyhow::Result<()> { - self.update_high_qc2(high_qc).await + async fn update_high_qc2(&self, _high_qc: QuorumCertificate2) -> anyhow::Result<()> { + Ok(()) } async fn update_decided_upgrade_certificate( From 64eb0d848b9f8f4d8e167c08bcbd96062fe7e9c8 Mon Sep 17 00:00:00 2001 From: tbro Date: Mon, 10 Feb 2025 17:46:29 -0300 Subject: [PATCH 051/120] Make epoch_height on TestNetwork configurable Also adds an (incomplete) test to check that views are progressing beyond epoch boundries --- sequencer/src/api.rs | 54 +++++++++++++++++++++++++++++++++----------- sequencer/src/lib.rs | 5 ++++ 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 063822a16f..4836247f4b 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1557,7 +1557,10 @@ mod api_tests { #[cfg(test)] mod test { use committable::{Commitment, Committable}; - use std::{collections::BTreeMap, time::Duration}; + use std::{ + collections::{BTreeMap, HashSet}, + time::Duration, + }; use tokio::time::sleep; use espresso_types::{ @@ -2614,12 +2617,16 @@ mod test { } assert_eq!(receive_count, total_count + 1); } - // TODO instead of as above, listen to events until we get at least to view 3 - // maybe put in the slow category + // TODO unfinished test. the idea is to observe epochs and views + // are progressing in a sane way #[tokio::test(flavor = "multi_thread")] async fn test_hotshot_event_streaming_epoch_progression() { setup_test(); + // TODO currently getting `hotshot_task_impls::helpers: : Failed epoch safety check` + // at epoch_height + 1 + let epoch_height = 5; + let hotshot_event_streaming_port = pick_unused_port().expect("No ports free for hotshot event streaming"); let query_service_port = pick_unused_port().expect("No ports free for query service"); @@ -2638,7 +2645,10 @@ mod test { let anvil = Anvil::new().spawn(); let l1 = anvil.endpoint().parse().unwrap(); - let network_config = TestConfigBuilder::default().l1_url(l1).build(); + let network_config = TestConfigBuilder::default() + .l1_url(l1) + .with_epoch_height(epoch_height) + .build(); let config = TestNetworkConfigBuilder::default() .api_config(options) .network_config(network_config) @@ -2651,22 +2661,40 @@ mod test { .await .unwrap(); - let total_count = 5; + // wanted views + let total_count = epoch_height * 2; // wait for these events to receive on client 1 let mut receive_count = 0; + let mut views = HashSet::new(); + let mut i = 0; loop { let event = subscribed_events.next().await.unwrap(); - dbg!(&event); - tracing::info!( - "Received event in hotshot event streaming Client 1: {:?}", - event - ); - receive_count += 1; - if receive_count > total_count { - tracing::info!("Client Received at least desired events, exiting loop"); + let event = event.unwrap(); + let view_number = event.view_number; + views.insert(view_number); + dbg!(view_number); + + if let hotshot::types::EventType::Decide { .. } = event.event { + dbg!("got decide"); + + receive_count += 1; + } + // dbg!(event.clone().unwrap().view_number); + // tracing::info!( + // "Received event in hotshot event streaming Client 1: {:?}", + // event + // ); + if views.contains(&ViewNumber::new(total_count)) { + tracing::info!("Client Received at least desired views, exiting loop"); break; } + if i > 100 { + // Timeout + panic!("Views are not progressing"); + } + i += 1; } + // TODO this is still just a place holder assert_eq!(receive_count, total_count + 1); } } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index f63a00f42a..e394d48fdf 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -741,6 +741,11 @@ pub mod testing { self } + pub fn with_epoch_height(mut self, epoch_height: u64) -> Self { + self.config.epoch_height = epoch_height; + self + } + pub fn upgrades(mut self, upgrades: BTreeMap) -> Self { let upgrade = upgrades.get(&::Upgrade::VERSION).unwrap(); upgrade.set_hotshot_config_parameters(&mut self.config); From c9eeb2af57671b9f22eca89d8d5eefa46fac830d Mon Sep 17 00:00:00 2001 From: tbro Date: Mon, 10 Feb 2025 19:08:35 -0300 Subject: [PATCH 052/120] fix --- sequencer/src/api.rs | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 4836247f4b..4be855bf5e 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -2626,6 +2626,7 @@ mod test { // TODO currently getting `hotshot_task_impls::helpers: : Failed epoch safety check` // at epoch_height + 1 let epoch_height = 5; + type PosVersion = SequencerVersions, StaticVersion<0, 0>>; let hotshot_event_streaming_port = pick_unused_port().expect("No ports free for hotshot event streaming"); @@ -2653,7 +2654,7 @@ mod test { .api_config(options) .network_config(network_config) .build(); - let _network = TestNetwork::new(config, MockSequencerVersions::new()).await; + let _network = TestNetwork::new(config, PosVersion::new()).await; let mut subscribed_events = client .socket("hotshot-events/events") @@ -2664,27 +2665,18 @@ mod test { // wanted views let total_count = epoch_height * 2; // wait for these events to receive on client 1 - let mut receive_count = 0; let mut views = HashSet::new(); let mut i = 0; loop { let event = subscribed_events.next().await.unwrap(); let event = event.unwrap(); let view_number = event.view_number; - views.insert(view_number); - dbg!(view_number); + views.insert(view_number.u64()); if let hotshot::types::EventType::Decide { .. } = event.event { dbg!("got decide"); - - receive_count += 1; } - // dbg!(event.clone().unwrap().view_number); - // tracing::info!( - // "Received event in hotshot event streaming Client 1: {:?}", - // event - // ); - if views.contains(&ViewNumber::new(total_count)) { + if views.contains(&total_count) { tracing::info!("Client Received at least desired views, exiting loop"); break; } @@ -2694,7 +2686,6 @@ mod test { } i += 1; } - // TODO this is still just a place holder - assert_eq!(receive_count, total_count + 1); + assert!(views.contains(&total_count)); } } From 87b15471fcee5a752b02dd56a0e49d3cbbdcb65c Mon Sep 17 00:00:00 2001 From: tbro Date: Mon, 10 Feb 2025 19:16:57 -0300 Subject: [PATCH 053/120] remove comment --- sequencer/src/api.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 4be855bf5e..083ed2fa0f 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -2623,8 +2623,6 @@ mod test { async fn test_hotshot_event_streaming_epoch_progression() { setup_test(); - // TODO currently getting `hotshot_task_impls::helpers: : Failed epoch safety check` - // at epoch_height + 1 let epoch_height = 5; type PosVersion = SequencerVersions, StaticVersion<0, 0>>; From d8d460c84f025ea6d5bff2441d74be395dc5a926 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 11 Feb 2025 03:41:52 +0500 Subject: [PATCH 054/120] v0 and v1 availability modules --- Cargo.lock | 2 + Cargo.toml | 1 + hotshot-query-service/Cargo.toml | 1 + hotshot-query-service/src/availability.rs | 689 +++++++++++------- hotshot-query-service/src/explorer.rs | 1 + .../src/fetching/provider/any.rs | 7 +- .../src/fetching/provider/query_service.rs | 119 ++- hotshot-query-service/src/lib.rs | 15 +- sequencer-sqlite/Cargo.lock | 2 + sequencer/Cargo.toml | 1 + sequencer/src/api/endpoints.rs | 2 + sequencer/src/api/options.rs | 20 +- 12 files changed, 559 insertions(+), 301 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 655edc479c..a4c621a104 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5229,6 +5229,7 @@ dependencies = [ "refinery", "refinery-core", "reqwest 0.12.12", + "semver 1.0.24", "serde", "serde_json", "snafu 0.8.5", @@ -9877,6 +9878,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_distr", "reqwest 0.12.12", + "semver 1.0.24", "sequencer", "sequencer-utils", "serde", diff --git a/Cargo.toml b/Cargo.toml index d041a039cb..0ef8f0c76d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,6 +117,7 @@ libp2p-swarm-derive = { version = "0.35" } typenum = "1" cbor4ii = { version = "1.0", features = ["serde1"] } serde_bytes = { version = "0.11" } +semver = "1" num_cpus = "1" dashmap = "6" memoize = { version = "0.4", features = ["full"] } diff --git a/hotshot-query-service/Cargo.toml b/hotshot-query-service/Cargo.toml index da843a25bd..39048aa188 100644 --- a/hotshot-query-service/Cargo.toml +++ b/hotshot-query-service/Cargo.toml @@ -77,6 +77,7 @@ lazy_static = "1" prometheus = "0.13" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +semver = { workspace = true } snafu = "0.8" surf-disco = "0.9" tagged-base64 = "0.4" diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index 03cb36f744..e1382aefd7 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -29,7 +29,12 @@ use crate::{api::load_api, Payload, QueryError}; use derive_more::From; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use hotshot_types::traits::node_implementation::NodeType; + +use hotshot_types::{ + data::{Leaf, Leaf2, QuorumProposal}, + simple_certificate::QuorumCertificate, + traits::node_implementation::NodeType, +}; use serde::{Deserialize, Serialize}; use snafu::{OptionExt, Snafu}; use std::{fmt::Display, path::PathBuf, time::Duration}; @@ -161,9 +166,97 @@ impl Error { } } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[serde(bound = "")] +pub struct Leaf1QueryData { + pub(crate) leaf: Leaf, + pub(crate) qc: QuorumCertificate, +} + +fn downgrade_leaf(leaf2: Leaf2) -> Leaf { + // TODO do we still need some check here? + // `drb_seed` no longer exists on `Leaf2` + // if leaf2.drb_seed != [0; 32] && leaf2.drb_result != [0; 32] { + // panic!("Downgrade of Leaf2 to Leaf will lose DRB information!"); + // } + let quorum_proposal = QuorumProposal { + block_header: leaf2.block_header().clone(), + view_number: leaf2.view_number(), + justify_qc: leaf2.justify_qc().to_qc(), + upgrade_certificate: leaf2.upgrade_certificate(), + proposal_certificate: None, + }; + let mut leaf = Leaf::from_quorum_proposal(&quorum_proposal); + if let Some(payload) = leaf2.block_payload() { + leaf.fill_block_payload_unchecked(payload); + } + leaf +} + +impl From> for Leaf1QueryData { + fn from(value: LeafQueryData) -> Self { + Self { + leaf: downgrade_leaf(value.leaf), + qc: value.qc.to_qc(), + } + } +} + +async fn get_leaf_handler( + req: tide_disco::RequestParams, + state: &State, + timeout: Duration, +) -> Result, Error> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AvailabilityDataSource, + Types: NodeType, + Payload: QueryablePayload, +{ + let id = match req.opt_integer_param("height")? { + Some(height) => LeafId::Number(height), + None => LeafId::Hash(req.blob_param("hash")?), + }; + let fetch = state.read(|state| state.get_leaf(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchLeafSnafu { + resource: id.to_string(), + }) +} + +async fn get_leaf_range_handler( + req: tide_disco::RequestParams, + state: &State, + timeout: Duration, + small_object_range_limit: usize, +) -> Result>, Error> +where + State: 'static + Send + Sync + ReadState, + ::State: Send + Sync + AvailabilityDataSource, + Types: NodeType, + Payload: QueryablePayload, +{ + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, small_object_range_limit)?; + + let leaves = state + .read(|state| state.get_leaf_range(from..until).boxed()) + .await; + leaves + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchLeafSnafu { + resource: (index + from).to_string(), + }) + }) + .try_collect::>() + .await +} + pub fn define_api( options: &Options, _: Ver, + api_ver: semver::Version, ) -> Result, ApiError> where State: 'static + Send + Sync + ReadState, @@ -179,310 +272,331 @@ where let small_object_range_limit = options.small_object_range_limit; let large_object_range_limit = options.large_object_range_limit; - api.with_version("0.0.1".parse().unwrap()) - .at("get_leaf", move |req, state| { - async move { - let id = match req.opt_integer_param("height")? { - Some(height) => LeafId::Number(height), - None => LeafId::Hash(req.blob_param("hash")?), - }; - let fetch = state.read(|state| state.get_leaf(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchLeafSnafu { - resource: id.to_string(), - }) - } - .boxed() - })? - .at("get_leaf_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, small_object_range_limit)?; - - let leaves = state - .read(|state| state.get_leaf_range(from..until).boxed()) - .await; - leaves - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchLeafSnafu { - resource: (index + from).to_string(), - }) + api.with_version(api_ver.clone()); + + // `LeafQueryData` now contains `Leaf2` and `QC2``, which is a breaking change. + // On node startup, all leaves are migrated to `Leaf2`. + // + // To maintain compatibility with nodes running an older version + // (which expect `LeafQueryData` with `Leaf1` and `QC1`), + // we downgrade `Leaf2` to `Leaf1` and `QC2` to `QC1` if the API version is V0. + // Otherwise, we return the new types. + if api_ver.major == 0 { + api.at("get_leaf", move |req, state| { + get_leaf_handler(req, state, timeout) + .map(|res| res.map(Leaf1QueryData::from)) + .boxed() + })?; + + api.at("get_leaf_range", move |req, state| { + get_leaf_range_handler(req, state, timeout, small_object_range_limit) + .map(|res| { + res.map(|r| { + r.into_iter() + .map(Into::into) + .collect::>>() }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_leaves", move |req, state| { + }) + .boxed() + })?; + + api.stream("stream_leaves", move |req, state| { async move { let height = req.integer_param("height")?; state .read(|state| { - async move { Ok(state.subscribe_leaves(height).await.map(Ok)) }.boxed() + async move { + Ok(state + .subscribe_leaves(height) + .await + .map(|leaf| Ok(Leaf1QueryData::from(leaf)))) + } + .boxed() }) .await } .try_flatten_stream() .boxed() - })? - .at("get_header", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_header(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { - resource: id.to_string(), - }) - } - .boxed() - })? - .at("get_header_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param::<_, usize>("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let headers = state - .read(|state| state.get_header_range(from..until).boxed()) - .await; - headers - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { - resource: (index + from).to_string(), - }) - }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_headers", move |req, state| { + })?; + } else { + api.at("get_leaf", move |req, state| { + get_leaf_handler(req, state, timeout).boxed() + })?; + + api.at("get_leaf_range", move |req, state| { + get_leaf_range_handler(req, state, timeout, small_object_range_limit).boxed() + })?; + + api.stream("stream_leaves", move |req, state| { async move { let height = req.integer_param("height")?; state .read(|state| { - async move { Ok(state.subscribe_headers(height).await.map(Ok)) }.boxed() + async move { Ok(state.subscribe_leaves(height).await.map(Ok)) }.boxed() }) .await } .try_flatten_stream() .boxed() - })? - .at("get_block", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_block(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: id.to_string(), + })?; + } + api.at("get_header", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_header(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_header_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param::<_, usize>("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let headers = state + .read(|state| state.get_header_range(from..until).boxed()) + .await; + headers + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchHeaderSnafu { + resource: (index + from).to_string(), + }) }) - } - .boxed() - })? - .at("get_block_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let blocks = state - .read(|state| state.get_block_range(from..until).boxed()) - .await; - blocks - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), - }) + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_headers", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_headers(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_block", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_block(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_block_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let blocks = state + .read(|state| state.get_block_range(from..until).boxed()) + .await; + blocks + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_blocks", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_blocks(height).await.map(Ok)) }.boxed() + }) + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_blocks", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_blocks(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_payload", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::PayloadHash(hash) + } else { + BlockId::Hash(req.blob_param("block-hash")?) + }; + let fetch = state.read(|state| state.get_payload(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .at("get_payload_range", move |req, state| { + async move { + let from = req.integer_param::<_, usize>("from")?; + let until = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let payloads = state + .read(|state| state.get_payload_range(from..until).boxed()) + .await; + payloads + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .await - } - .try_flatten_stream() - .boxed() - })? - .at("get_payload", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::PayloadHash(hash) - } else { - BlockId::Hash(req.blob_param("block-hash")?) - }; - let fetch = state.read(|state| state.get_payload(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: id.to_string(), }) - } - .boxed() - })? - .at("get_payload_range", move |req, state| { - async move { - let from = req.integer_param::<_, usize>("from")?; - let until = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let payloads = state - .read(|state| state.get_payload_range(from..until).boxed()) - .await; - payloads - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), + .try_collect::>() + .await + } + .boxed() + })? + .stream("stream_payloads", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_payloads(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_vid_common", move |req, state| { + async move { + let id = if let Some(height) = req.opt_integer_param("height")? { + BlockId::Number(height) + } else if let Some(hash) = req.opt_blob_param("hash")? { + BlockId::Hash(hash) + } else { + BlockId::PayloadHash(req.blob_param("payload-hash")?) + }; + let fetch = state.read(|state| state.get_vid_common(id).boxed()).await; + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: id.to_string(), + }) + } + .boxed() + })? + .stream("stream_vid_common", move |req, state| { + async move { + let height = req.integer_param("height")?; + state + .read(|state| { + async move { Ok(state.subscribe_vid_common(height).await.map(Ok)) }.boxed() + }) + .await + } + .try_flatten_stream() + .boxed() + })? + .at("get_transaction", move |req, state| { + async move { + match req.opt_blob_param("hash")? { + Some(hash) => { + let fetch = state + .read(|state| state.get_transaction(hash).boxed()) + .await; + fetch + .with_timeout(timeout) + .await + .context(FetchTransactionSnafu { + resource: hash.to_string(), }) - }) - .try_collect::>() - .await - } - .boxed() - })? - .stream("stream_payloads", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_payloads(height).await.map(Ok)) }.boxed() - }) - .await + } + None => { + let height: u64 = req.integer_param("height")?; + let fetch = state + .read(|state| state.get_block(height as usize).boxed()) + .await; + let block = fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: height.to_string(), + })?; + let i: u64 = req.integer_param("index")?; + let index = block + .payload() + .nth(block.metadata(), i as usize) + .context(InvalidTransactionIndexSnafu { height, index: i })?; + TransactionQueryData::new(&block, index, i) + .context(InvalidTransactionIndexSnafu { height, index: i }) + } } - .try_flatten_stream() - .boxed() - })? - .at("get_vid_common", move |req, state| { - async move { - let id = if let Some(height) = req.opt_integer_param("height")? { - BlockId::Number(height) - } else if let Some(hash) = req.opt_blob_param("hash")? { - BlockId::Hash(hash) - } else { - BlockId::PayloadHash(req.blob_param("payload-hash")?) - }; - let fetch = state.read(|state| state.get_vid_common(id).boxed()).await; - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + } + .boxed() + })? + .at("get_block_summary", move |req, state| { + async move { + let id: usize = req.integer_param("height")?; + + let fetch = state.read(|state| state.get_block(id).boxed()).await; + fetch + .with_timeout(timeout) + .await + .context(FetchBlockSnafu { resource: id.to_string(), }) - } - .boxed() - })? - .stream("stream_vid_common", move |req, state| { - async move { - let height = req.integer_param("height")?; - state - .read(|state| { - async move { Ok(state.subscribe_vid_common(height).await.map(Ok)) }.boxed() - }) - .await - } - .try_flatten_stream() - .boxed() - })? - .at("get_transaction", move |req, state| { - async move { - match req.opt_blob_param("hash")? { - Some(hash) => { - let fetch = state - .read(|state| state.get_transaction(hash).boxed()) - .await; - fetch - .with_timeout(timeout) - .await - .context(FetchTransactionSnafu { - resource: hash.to_string(), - }) - } - None => { - let height: u64 = req.integer_param("height")?; - let fetch = state - .read(|state| state.get_block(height as usize).boxed()) - .await; - let block = fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: height.to_string(), - })?; - let i: u64 = req.integer_param("index")?; - let index = block - .payload() - .nth(block.metadata(), i as usize) - .context(InvalidTransactionIndexSnafu { height, index: i })?; - TransactionQueryData::new(&block, index, i) - .context(InvalidTransactionIndexSnafu { height, index: i }) - } - } - } - .boxed() - })? - .at("get_block_summary", move |req, state| { - async move { - let id: usize = req.integer_param("height")?; - - let fetch = state.read(|state| state.get_block(id).boxed()).await; - fetch - .with_timeout(timeout) - .await - .context(FetchBlockSnafu { - resource: id.to_string(), - }) - .map(BlockSummaryQueryData::from) - } - .boxed() - })? - .at("get_block_summary_range", move |req, state| { - async move { - let from: usize = req.integer_param("from")?; - let until: usize = req.integer_param("until")?; - enforce_range_limit(from, until, large_object_range_limit)?; - - let blocks = state - .read(|state| state.get_block_range(from..until).boxed()) - .await; - let result: Vec> = blocks - .enumerate() - .then(|(index, fetch)| async move { - fetch.with_timeout(timeout).await.context(FetchBlockSnafu { - resource: (index + from).to_string(), - }) + .map(BlockSummaryQueryData::from) + } + .boxed() + })? + .at("get_block_summary_range", move |req, state| { + async move { + let from: usize = req.integer_param("from")?; + let until: usize = req.integer_param("until")?; + enforce_range_limit(from, until, large_object_range_limit)?; + + let blocks = state + .read(|state| state.get_block_range(from..until).boxed()) + .await; + let result: Vec> = blocks + .enumerate() + .then(|(index, fetch)| async move { + fetch.with_timeout(timeout).await.context(FetchBlockSnafu { + resource: (index + from).to_string(), }) - .map(|result| result.map(BlockSummaryQueryData::from)) - .try_collect() - .await?; - - Ok(result) - } - .boxed() - })? - .at("get_limits", move |_req, _state| { - async move { - Ok(Limits { - small_object_range_limit, - large_object_range_limit, }) - } - .boxed() - })?; + .map(|result| result.map(BlockSummaryQueryData::from)) + .try_collect() + .await?; + + Ok(result) + } + .boxed() + })? + .at("get_limits", move |_req, _state| { + async move { + Ok(Limits { + small_object_range_limit, + large_object_range_limit, + }) + } + .boxed() + })?; Ok(api) } @@ -790,7 +904,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -926,6 +1045,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "0.0.1".parse().unwrap(), ) .unwrap(); api.get("get_ext", |_, state| { @@ -996,6 +1116,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "0.0.1".parse().unwrap(), ) .unwrap(), ) @@ -1080,7 +1201,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1117,7 +1243,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( diff --git a/hotshot-query-service/src/explorer.rs b/hotshot-query-service/src/explorer.rs index 03ca26323c..ef259a83e5 100644 --- a/hotshot-query-service/src/explorer.rs +++ b/hotshot-query-service/src/explorer.rs @@ -878,6 +878,7 @@ mod test { ..Default::default() }, MockBase::instance(), + "0.0.1".parse().unwrap(), ) .unwrap(), ) diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index f2f4c3536a..37d5b854db 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -234,7 +234,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); let _server = BackgroundTask::spawn( diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index 0fad9606c8..7e752c8222 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -249,7 +249,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -472,7 +477,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -530,7 +540,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -592,7 +607,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -651,7 +671,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -707,7 +732,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -778,7 +808,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -923,7 +958,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1091,7 +1131,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1190,7 +1235,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1282,7 +1332,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1346,7 +1401,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1404,7 +1464,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1480,7 +1545,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1570,7 +1640,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1637,7 +1712,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( @@ -1709,7 +1789,12 @@ mod test { let mut app = App::<_, Error>::with_state(ApiState::from(network.data_source())); app.register_module( "availability", - define_api(&Default::default(), MockBase::instance()).unwrap(), + define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap(); network.spawn( diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index b215d8cad8..8c0be66106 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -541,8 +541,12 @@ where ApiVer: StaticVersionType + 'static, { // Create API modules. - let availability_api = - availability::define_api(&options.availability, bind_version).map_err(Error::internal)?; + let availability_api = availability::define_api( + &options.availability, + bind_version, + "0.0.1".parse().unwrap(), + ) + .map_err(Error::internal)?; let node_api = node::define_api(&options.node, bind_version).map_err(Error::internal)?; let status_api = status::define_api(&options.status, bind_version).map_err(Error::internal)?; @@ -860,7 +864,12 @@ mod test { let mut app = App::<_, Error>::with_state(RwLock::new(state)); app.register_module( "availability", - availability::define_api(&Default::default(), MockBase::instance()).unwrap(), + availability::define_api( + &Default::default(), + MockBase::instance(), + "0.0.1".parse().unwrap(), + ) + .unwrap(), ) .unwrap() .register_module( diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index acfa39f3b5..1a688e4ec9 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4896,6 +4896,7 @@ dependencies = [ "prometheus", "refinery", "refinery-core", + "semver 1.0.24", "serde", "serde_json", "snafu 0.8.5", @@ -9262,6 +9263,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", + "semver 1.0.24", "sequencer-utils", "serde", "serde_json", diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 69564f90d5..777e8e785e 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -96,6 +96,7 @@ priority-queue = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rand_distr = { workspace = true } +semver = { workspace = true } sequencer-utils = { path = "../utils" } serde = { workspace = true } serde_json = { workspace = true } diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index c33df0196a..ed661026e8 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -90,6 +90,7 @@ type AvailabilityApi = Api, availabil // Snafu has been replaced by `this_error` everywhere. // However, the query service still uses snafu pub(super) fn availability( + api_ver: semver::Version, ) -> Result> where N: ConnectedNetwork, @@ -104,6 +105,7 @@ where let mut api = availability::define_api::, SeqTypes, _>( &options, SequencerApiVersion::instance(), + api_ver, )?; api.get("getnamespaceproof", move |req, state| { diff --git a/sequencer/src/api/options.rs b/sequencer/src/api/options.rs index f2fa44552c..c1de9236f2 100644 --- a/sequencer/src/api/options.rs +++ b/sequencer/src/api/options.rs @@ -200,7 +200,25 @@ impl Options { app.register_module("status", status_api)?; // Initialize availability and node APIs (these both use the same data source). - app.register_module("availability", endpoints::availability()?)?; + + // Note: We initialize two versions of the availability module: `availability/v0` and `availability/v1`. + // - `availability/v0/leaf/0` returns the old `Leaf1` type for backward compatibility. + // - `availability/v1/leaf/0` returns the new `Leaf2` type + + // initialize the availability module for API version V0. + // This ensures compatibility for nodes that expect `Leaf1` for leaf endpoints + app.register_module( + "availability", + endpoints::availability("0.0.1".parse().unwrap())?, + )?; + + // initialize the availability module for API version V1. + // This enables support for the new `Leaf2` type + app.register_module( + "availability", + endpoints::availability("1.0.0".parse().unwrap())?, + )?; + app.register_module("node", endpoints::node()?)?; // Initialize submit API From 8fe27d134083d395eeb36fda1f0b07b526198076 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 11 Feb 2025 03:47:19 +0500 Subject: [PATCH 055/120] cargo sort --- hotshot-query-service/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hotshot-query-service/Cargo.toml b/hotshot-query-service/Cargo.toml index 39048aa188..c720cdc450 100644 --- a/hotshot-query-service/Cargo.toml +++ b/hotshot-query-service/Cargo.toml @@ -75,9 +75,9 @@ jf-vid = { version = "0.1.0", git = "https://github.com/EspressoSystems/jellyfis ] } lazy_static = "1" prometheus = "0.13" +semver = { workspace = true } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -semver = { workspace = true } snafu = "0.8" surf-disco = "0.9" tagged-base64 = "0.4" From 04dbac0f377b98ca3c5deeaeb72e6b9f3135bd91 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 11 Feb 2025 04:01:05 +0500 Subject: [PATCH 056/120] fix tests --- hotshot-query-service/src/availability.rs | 10 +++--- .../src/fetching/provider/any.rs | 2 +- .../src/fetching/provider/query_service.rs | 34 +++++++++---------- hotshot-query-service/src/lib.rs | 15 ++++++-- 4 files changed, 35 insertions(+), 26 deletions(-) diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index e1382aefd7..97797f5df2 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -907,7 +907,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1045,7 +1045,7 @@ mod test { ..Default::default() }, MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(); api.get("get_ext", |_, state| { @@ -1116,7 +1116,7 @@ mod test { ..Default::default() }, MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1204,7 +1204,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1246,7 +1246,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index 37d5b854db..0167941150 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -237,7 +237,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index 7e752c8222..3046c9578b 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -252,7 +252,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -480,7 +480,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -543,7 +543,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -610,7 +610,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -674,7 +674,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -735,7 +735,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -811,7 +811,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -961,7 +961,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1134,7 +1134,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1238,7 +1238,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1335,7 +1335,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1404,7 +1404,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1467,7 +1467,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1548,7 +1548,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1643,7 +1643,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1715,7 +1715,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) @@ -1792,7 +1792,7 @@ mod test { define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) diff --git a/hotshot-query-service/src/lib.rs b/hotshot-query-service/src/lib.rs index 8c0be66106..4d158156fa 100644 --- a/hotshot-query-service/src/lib.rs +++ b/hotshot-query-service/src/lib.rs @@ -541,19 +541,28 @@ where ApiVer: StaticVersionType + 'static, { // Create API modules. - let availability_api = availability::define_api( + let availability_api_v0 = availability::define_api( &options.availability, bind_version, "0.0.1".parse().unwrap(), ) .map_err(Error::internal)?; + + let availability_api_v1 = availability::define_api( + &options.availability, + bind_version, + "1.0.0".parse().unwrap(), + ) + .map_err(Error::internal)?; let node_api = node::define_api(&options.node, bind_version).map_err(Error::internal)?; let status_api = status::define_api(&options.status, bind_version).map_err(Error::internal)?; // Create app. let data_source = Arc::new(data_source); let mut app = App::<_, Error>::with_state(ApiState(data_source.clone())); - app.register_module("availability", availability_api) + app.register_module("availability", availability_api_v0) + .map_err(Error::internal)? + .register_module("availability", availability_api_v1) .map_err(Error::internal)? .register_module("node", node_api) .map_err(Error::internal)? @@ -867,7 +876,7 @@ mod test { availability::define_api( &Default::default(), MockBase::instance(), - "0.0.1".parse().unwrap(), + "1.0.0".parse().unwrap(), ) .unwrap(), ) From 86fd4a90cec70668129d2f7ab90c79d0f38528e6 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 11 Feb 2025 04:12:24 +0500 Subject: [PATCH 057/120] lock file --- Cargo.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1772d8d2d5..f112b09191 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1806,7 +1806,7 @@ dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.11.0", + "itertools 0.10.5", "log", "prettyplease", "proc-macro2", @@ -2483,9 +2483,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -5591,7 +5591,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.16", - "socket2 0.4.10", + "socket2 0.5.8", "tokio", "tower-service", "tracing", @@ -6593,7 +6593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -8663,7 +8663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.11.0", + "itertools 0.12.1", "proc-macro2", "quote", "syn 2.0.95", @@ -10326,7 +10326,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03c3c6b7927ffe7ecaa769ee0e3994da3b8cafc8f444578982c83ecb161af917" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.95", From 1d47a0806563ba9e371caf41716c9fd0258acfc0 Mon Sep 17 00:00:00 2001 From: tbro Date: Tue, 11 Feb 2025 11:58:24 -0300 Subject: [PATCH 058/120] add test profile --- Cargo.toml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 8006e9045d..94308888ad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -224,3 +224,11 @@ paste = "1.0" rand = "0.8.5" time = "0.3" trait-set = "0.3.0" +[profile.test] +opt-level = 1 +[profile.test.package.tests] +opt-level = 0 +[profile.test.package.client] +opt-level = 0 +[profile.test.package.hotshot-state-prover] +opt-level = 3 From 9936c1ee5be92145c3edf38754478a7bf3d9a7de Mon Sep 17 00:00:00 2001 From: tbro Date: Wed, 12 Feb 2025 16:39:52 -0300 Subject: [PATCH 059/120] Add pos view based upgrade test Currently does not pass. We get votes but after voting there is not progress. Removed fee upgrade test, b/c there is no longer a version earlier than fee. --- sequencer/src/api.rs | 76 +++++--------------------------------------- sequencer/src/run.rs | 1 + 2 files changed, 9 insertions(+), 68 deletions(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 761b253d2e..ec9cca931d 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -2130,11 +2130,11 @@ mod test { } #[tokio::test(flavor = "multi_thread")] - async fn test_fee_upgrade_view_based() { + async fn test_pos_upgrade_view_based() { setup_test(); let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions, StaticVersion<0, 2>>; + type MySequencerVersions = SequencerVersions; let mode = UpgradeMode::View(ViewBasedUpgrade { start_voting_view: None, @@ -2143,41 +2143,11 @@ mod test { stop_proposing_view: 10, }); - let upgrade_type = UpgradeType::Fee { - chain_config: ChainConfig { - max_block_size: 300.into(), - base_fee: 1.into(), - ..Default::default() - }, - }; - - upgrades.insert( - ::Upgrade::VERSION, - Upgrade { mode, upgrade_type }, - ); - test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_fee_upgrade_time_based() { - setup_test(); - - let now = OffsetDateTime::now_utc().unix_timestamp() as u64; - - let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions, StaticVersion<0, 2>>; - - let mode = UpgradeMode::Time(TimeBasedUpgrade { - start_proposing_time: Timestamp::from_integer(now).unwrap(), - stop_proposing_time: Timestamp::from_integer(now + 500).unwrap(), - start_voting_time: None, - stop_voting_time: None, - }); - - let upgrade_type = UpgradeType::Fee { + let upgrade_type = UpgradeType::Epoch { chain_config: ChainConfig { - max_block_size: 300.into(), - base_fee: 1.into(), + max_block_size: 500.into(), + base_fee: 2.into(), + stake_table_contract: Some(Default::default()), ..Default::default() }, }; @@ -2194,7 +2164,7 @@ mod test { setup_test(); let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions; + type MySequencerVersions = SequencerVersions; let mode = UpgradeMode::View(ViewBasedUpgrade { start_voting_view: None, @@ -2226,7 +2196,7 @@ mod test { let now = OffsetDateTime::now_utc().unix_timestamp() as u64; let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions; + type MySequencerVersions = SequencerVersions; let mode = UpgradeMode::Time(TimeBasedUpgrade { start_proposing_time: Timestamp::from_integer(now).unwrap(), @@ -2251,36 +2221,6 @@ mod test { test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; } - #[tokio::test(flavor = "multi_thread")] - async fn test_pos_upgrade_view_based() { - setup_test(); - - let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions; - - let mode = UpgradeMode::View(ViewBasedUpgrade { - start_voting_view: None, - stop_voting_view: None, - start_proposing_view: 1, - stop_proposing_view: 10, - }); - - let upgrade_type = UpgradeType::Marketplace { - chain_config: ChainConfig { - max_block_size: 400.into(), - base_fee: 2.into(), - bid_recipient: Some(Default::default()), - ..Default::default() - }, - }; - - upgrades.insert( - ::Upgrade::VERSION, - Upgrade { mode, upgrade_type }, - ); - test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; - } - async fn test_upgrade_helper( upgrades: BTreeMap, bind_version: MockSeqVersions, diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index d6efcf2314..6efe90c484 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -59,6 +59,7 @@ pub async fn main() -> anyhow::Result<()> { ) .await } + // TODO change `fee` to `pos` #[cfg(all(feature = "fee", feature = "marketplace"))] (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { run( From 0d91dc86c5bcaf9d494e7aa4aaad019758df1413 Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 13 Feb 2025 15:12:37 -0300 Subject: [PATCH 060/120] Fix chain_config upgrade in Header::new --- types/src/v0/impls/header.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 3db2309edb..427dd2bf21 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -978,6 +978,7 @@ impl BlockHeader for Header { match instance_state.upgrades.get(&version) { Some(upgrade) => match upgrade.upgrade_type { UpgradeType::Fee { chain_config } => chain_config, + UpgradeType::Epoch { chain_config } => chain_config, _ => Header::get_chain_config(&validated_state, instance_state).await?, }, None => Header::get_chain_config(&validated_state, instance_state).await?, From 0965d904a0437a5393bc4ea00d4a196b78d9e595 Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 13 Feb 2025 15:13:06 -0300 Subject: [PATCH 061/120] remove a TODO (hurray!) --- sequencer/src/lib.rs | 1 - types/src/v0/impls/instance_state.rs | 7 ------- 2 files changed, 8 deletions(-) diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index e6eab1ca28..1d13e38029 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -979,7 +979,6 @@ pub mod testing { ) .with_current_version(V::Base::version()) .with_genesis(state) - .with_epoch_height(config.epoch_height) .with_upgrades(upgrades); // Create the HotShot membership diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 545b3722ce..99d4e4d10b 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -135,13 +135,6 @@ impl NodeState { self.current_version = ver; self } - - // TODO remove following `Memberships` trait update: - // https://github.com/EspressoSystems/HotShot/issues/3966 - pub fn with_epoch_height(mut self, epoch_height: u64) -> Self { - self.epoch_height = Some(epoch_height); - self - } } // This allows us to turn on `Default` on InstanceState trait From a980b6c2d27d11737378e9ec0dec9aca8d58691e Mon Sep 17 00:00:00 2001 From: tbro Date: Thu, 13 Feb 2025 15:13:24 -0300 Subject: [PATCH 062/120] add a mock_v3 to `InstanceState` --- types/src/v0/impls/instance_state.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 99d4e4d10b..115c8050aa 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -97,6 +97,20 @@ impl NodeState { ) } + #[cfg(any(test, feature = "testing"))] + pub fn mock_v3() -> Self { + use vbs::version::StaticVersion; + + Self::new( + 0, + ChainConfig::default(), + L1Client::new(vec!["http://localhost:3331".parse().unwrap()]) + .expect("Failed to create L1 client"), + mock::MockStateCatchup::default(), + StaticVersion::<0, 3>::version(), + ) + } + #[cfg(any(test, feature = "testing"))] pub fn mock_v99() -> Self { use vbs::version::StaticVersion; From dfb0a88aa681f18f37d3787292374a95b02a8dd6 Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 14 Feb 2025 11:04:32 -0300 Subject: [PATCH 063/120] log error on ChainConfig upgrade in Header::new --- types/src/v0/impls/header.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 427dd2bf21..e0164cf653 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -979,7 +979,10 @@ impl BlockHeader for Header { Some(upgrade) => match upgrade.upgrade_type { UpgradeType::Fee { chain_config } => chain_config, UpgradeType::Epoch { chain_config } => chain_config, - _ => Header::get_chain_config(&validated_state, instance_state).await?, + _ => { + tracing::error!("Header::new() ChainConfig Upgrade: Unknown UpgradeType"); + Header::get_chain_config(&validated_state, instance_state).await?; + } }, None => Header::get_chain_config(&validated_state, instance_state).await?, } From 88b8280f245346078e4399b1b2d2666fde4a92ef Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 14 Feb 2025 11:10:43 -0300 Subject: [PATCH 064/120] fix --- types/src/v0/impls/header.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index e0164cf653..c1a92f46a2 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -981,7 +981,7 @@ impl BlockHeader for Header { UpgradeType::Epoch { chain_config } => chain_config, _ => { tracing::error!("Header::new() ChainConfig Upgrade: Unknown UpgradeType"); - Header::get_chain_config(&validated_state, instance_state).await?; + Header::get_chain_config(&validated_state, instance_state).await? } }, None => Header::get_chain_config(&validated_state, instance_state).await?, From f61e7994befb955b5511838dffff23f3cf6c91ac Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 14 Feb 2025 12:47:58 -0300 Subject: [PATCH 065/120] Update sqlx (#2617) Updating sqlx to `0.8.3` appears to resolve. Excessive memory usage when compiling bundled sqlite. It was originally planned to add `sqlite-unbundled` as well to use native/system sqlite, but apparently excessive memory usage disappears w/ out that change. And it may be disruptive as our deployments currently rely on the bundled sqlite. As a followup, we can consider adding our own `sqlite-unbundled` feature to easily avoid compiling sqlite where we know there is one available in testing. --------- Co-authored-by: tbro --- Cargo.lock | 71 +++++++++++++------------------------ Cargo.toml | 2 +- sequencer-sqlite/Cargo.lock | 67 ++++++++++++---------------------- 3 files changed, 47 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5566a180f4..6722fe2698 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1806,7 +1806,7 @@ dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.10.5", + "itertools 0.13.0", "log", "prettyplease", "proc-macro2", @@ -4649,11 +4649,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -6593,7 +6593,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -10400,21 +10400,11 @@ dependencies = [ "der", ] -[[package]] -name = "sqlformat" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" -dependencies = [ - "nom", - "unicode_categories", -] - [[package]] name = "sqlx" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" +checksum = "4410e73b3c0d8442c5f99b425d7a435b5ee0ae4167b3196771dd3f7a01be745f" dependencies = [ "sqlx-core", "sqlx-macros", @@ -10425,39 +10415,33 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" +checksum = "6a007b6936676aa9ab40207cde35daab0a04b823be8ae004368c0793b96a61e0" dependencies = [ - "atoi", "bit-vec 0.6.3", - "byteorder", "bytes 1.9.0", "crc", "crossbeam-queue", "either", "event-listener 5.4.0", - "futures-channel", "futures-core", "futures-intrusive", "futures-io", "futures-util", - "hashbrown 0.14.5", - "hashlink 0.9.1", - "hex", + "hashbrown 0.15.2", + "hashlink 0.10.0", "indexmap 2.7.0", "log", "memchr", "native-tls", "once_cell", - "paste", "percent-encoding", "serde", "serde_json", "sha2 0.10.8", "smallvec", - "sqlformat", - "thiserror 1.0.69", + "thiserror 2.0.10", "time 0.3.37", "tokio", "tokio-stream", @@ -10467,9 +10451,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" +checksum = "3112e2ad78643fef903618d78cf0aec1cb3134b019730edb039b69eaf531f310" dependencies = [ "proc-macro2", "quote", @@ -10480,9 +10464,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" +checksum = "4e9f90acc5ab146a99bf5061a7eb4976b573f560bc898ef3bf8435448dd5e7ad" dependencies = [ "dotenvy", "either", @@ -10506,9 +10490,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" +checksum = "4560278f0e00ce64938540546f59f590d60beee33fffbd3b9cd47851e5fff233" dependencies = [ "atoi", "base64 0.22.1", @@ -10541,7 +10525,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 1.0.69", + "thiserror 2.0.10", "time 0.3.37", "tracing", "whoami", @@ -10549,9 +10533,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" +checksum = "c5b98a57f363ed6764d5b3a12bfedf62f07aa16e1856a7ddc2a0bb190a959613" dependencies = [ "atoi", "base64 0.22.1", @@ -10563,7 +10547,6 @@ dependencies = [ "etcetera", "futures-channel", "futures-core", - "futures-io", "futures-util", "hex", "hkdf 0.12.4", @@ -10581,7 +10564,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 1.0.69", + "thiserror 2.0.10", "time 0.3.37", "tracing", "whoami", @@ -10589,9 +10572,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" +checksum = "f85ca71d3a5b24e64e1d08dd8fe36c6c95c339a896cc33068148906784620540" dependencies = [ "atoi", "flume 0.11.1", @@ -11956,12 +11939,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - [[package]] name = "universal-hash" version = "0.4.0" diff --git a/Cargo.toml b/Cargo.toml index 94308888ad..7c7d9278b3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -194,7 +194,7 @@ log-panics = { version = "2.0", features = ["with-backtrace"] } lru = "0.12" strum = { version = "0.26", features = ["derive"] } surf-disco = "0.9" -sqlx = "=0.8.2" +sqlx = "=0.8.3" tagged-base64 = "0.4" tide-disco = "0.9.3" thiserror = "1.0.69" diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index a2d52a31c8..262dcb4025 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4437,11 +4437,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -9793,21 +9793,11 @@ dependencies = [ "der", ] -[[package]] -name = "sqlformat" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" -dependencies = [ - "nom", - "unicode_categories", -] - [[package]] name = "sqlx" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" +checksum = "4410e73b3c0d8442c5f99b425d7a435b5ee0ae4167b3196771dd3f7a01be745f" dependencies = [ "sqlx-core", "sqlx-macros", @@ -9818,39 +9808,33 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" +checksum = "6a007b6936676aa9ab40207cde35daab0a04b823be8ae004368c0793b96a61e0" dependencies = [ - "atoi", "bit-vec", - "byteorder", "bytes 1.9.0", "crc", "crossbeam-queue", "either", "event-listener 5.4.0", - "futures-channel", "futures-core", "futures-intrusive", "futures-io", "futures-util", - "hashbrown 0.14.5", - "hashlink 0.9.1", - "hex", + "hashbrown 0.15.2", + "hashlink 0.10.0", "indexmap 2.7.0", "log", "memchr", "native-tls", "once_cell", - "paste", "percent-encoding", "serde", "serde_json", "sha2 0.10.8", "smallvec", - "sqlformat", - "thiserror 1.0.69", + "thiserror 2.0.10", "time 0.3.37", "tokio", "tokio-stream", @@ -9860,9 +9844,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" +checksum = "3112e2ad78643fef903618d78cf0aec1cb3134b019730edb039b69eaf531f310" dependencies = [ "proc-macro2", "quote", @@ -9873,9 +9857,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" +checksum = "4e9f90acc5ab146a99bf5061a7eb4976b573f560bc898ef3bf8435448dd5e7ad" dependencies = [ "dotenvy", "either", @@ -9899,9 +9883,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" +checksum = "4560278f0e00ce64938540546f59f590d60beee33fffbd3b9cd47851e5fff233" dependencies = [ "atoi", "base64 0.22.1", @@ -9934,7 +9918,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 1.0.69", + "thiserror 2.0.10", "time 0.3.37", "tracing", "whoami", @@ -9942,9 +9926,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" +checksum = "c5b98a57f363ed6764d5b3a12bfedf62f07aa16e1856a7ddc2a0bb190a959613" dependencies = [ "atoi", "base64 0.22.1", @@ -9956,7 +9940,6 @@ dependencies = [ "etcetera", "futures-channel", "futures-core", - "futures-io", "futures-util", "hex", "hkdf 0.12.4", @@ -9974,7 +9957,7 @@ dependencies = [ "smallvec", "sqlx-core", "stringprep", - "thiserror 1.0.69", + "thiserror 2.0.10", "time 0.3.37", "tracing", "whoami", @@ -9982,9 +9965,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" +checksum = "f85ca71d3a5b24e64e1d08dd8fe36c6c95c339a896cc33068148906784620540" dependencies = [ "atoi", "flume 0.11.1", @@ -11275,12 +11258,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" -[[package]] -name = "unicode_categories" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" - [[package]] name = "universal-hash" version = "0.4.0" From a94027ad9a4fe47a507a7d9dbd3a7d0b50fed798 Mon Sep 17 00:00:00 2001 From: tbro Date: Fri, 14 Feb 2025 14:14:37 -0300 Subject: [PATCH 066/120] Cleanup chain_config upgrade in `Header::new_legacy` And add some tests --- types/src/v0/impls/header.rs | 14 +--- types/src/v0/impls/instance_state.rs | 96 +++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 16 deletions(-) diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index c1a92f46a2..bb7b955ae2 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -974,19 +974,7 @@ impl BlockHeader for Header { let mut validated_state = parent_state.clone(); - let chain_config = if version > instance_state.current_version { - match instance_state.upgrades.get(&version) { - Some(upgrade) => match upgrade.upgrade_type { - UpgradeType::Fee { chain_config } => chain_config, - UpgradeType::Epoch { chain_config } => chain_config, - _ => { - tracing::error!("Header::new() ChainConfig Upgrade: Unknown UpgradeType"); - Header::get_chain_config(&validated_state, instance_state).await? - } - }, - None => Header::get_chain_config(&validated_state, instance_state).await?, - } - } else { + let chain_config = instance_state.upgrade_chain_config(version) else { Header::get_chain_config(&validated_state, instance_state).await? }; diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 115c8050aa..b7f97484ec 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -9,7 +9,7 @@ use vbs::version::Version; #[cfg(any(test, feature = "testing"))] use vbs::version::{StaticVersion, StaticVersionType}; -use super::state::ValidatedState; +use super::{state::ValidatedState, UpgradeType}; /// Represents the immutable state of a node. /// @@ -145,10 +145,24 @@ impl NodeState { self } - pub fn with_current_version(mut self, ver: Version) -> Self { - self.current_version = ver; + pub fn with_current_version(mut self, version: Version) -> Self { + self.current_version = version; self } + + /// Given a `version`, get the correct `ChainConfig` from `self.upgrades`. + pub fn upgrade_chain_config(&self, version: Version) -> Option { + let chain_config = (version > self.current_version).then(|| { + self.upgrades + .get(&version) + .and_then(|upgrade| match upgrade.upgrade_type { + UpgradeType::Fee { chain_config } => Some(chain_config), + UpgradeType::Epoch { chain_config } => Some(chain_config), + _ => None, + }) + }); + chain_config? + } } // This allows us to turn on `Default` on InstanceState trait @@ -289,3 +303,79 @@ pub mod mock { } } } + +#[cfg(test)] +mod test { + + use crate::v0::Versions; + use crate::{EpochVersion, FeeVersion, SequencerVersions, ViewBasedUpgrade}; + + use super::*; + + #[test] + fn test_upgrade_chain_config_version_02() { + let mut upgrades = std::collections::BTreeMap::new(); + type MySequencerVersions = SequencerVersions, FeeVersion>; + + let mode = UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + }); + + let upgraded_chain_config = ChainConfig { + max_block_size: 300.into(), + base_fee: 1.into(), + ..Default::default() + }; + + let upgrade_type = UpgradeType::Fee { + chain_config: upgraded_chain_config, + }; + + upgrades.insert( + ::Upgrade::VERSION, + Upgrade { mode, upgrade_type }, + ); + + let instance_state = NodeState::mock().with_upgrades(upgrades); + + let chain_config = instance_state.upgrade_chain_config(FeeVersion::version()); + assert_eq!(Some(upgraded_chain_config), chain_config); + } + + #[test] + fn test_upgrade_chain_config_version_03() { + let mut upgrades = std::collections::BTreeMap::new(); + type MySequencerVersions = SequencerVersions; + + let mode = UpgradeMode::View(ViewBasedUpgrade { + start_voting_view: None, + stop_voting_view: None, + start_proposing_view: 1, + stop_proposing_view: 10, + }); + + let upgraded_chain_config = ChainConfig { + max_block_size: 300.into(), + base_fee: 1.into(), + stake_table_contract: Some(Default::default()), + ..Default::default() + }; + + let upgrade_type = UpgradeType::Epoch { + chain_config: upgraded_chain_config, + }; + + upgrades.insert( + ::Upgrade::VERSION, + Upgrade { mode, upgrade_type }, + ); + + let instance_state = NodeState::mock_v2().with_upgrades(upgrades); + + let chain_config = instance_state.upgrade_chain_config(EpochVersion::version()); + assert_eq!(Some(upgraded_chain_config), chain_config); + } +} From 8eaef68fd9724ffff374a5aaca30386cf629af43 Mon Sep 17 00:00:00 2001 From: Abdul Basit <45506001+imabdulbasit@users.noreply.github.com> Date: Mon, 17 Feb 2025 14:26:01 +0500 Subject: [PATCH 067/120] add initial stakers through stake table update txn AND no multi sig ownership transfer (#2580) add stakers through update txn && do not transfer ownership --- .env | 4 +-- data/genesis/demo-pos-base.toml | 2 +- data/genesis/demo-pos.toml | 2 +- data/initial_stake_table.toml | 10 ++++---- process-compose.yaml | 19 ++++++++++++-- sequencer/src/bin/deploy.rs | 25 ------------------- .../bin/update-permissioned-stake-table.rs | 6 ++++- sequencer/src/lib.rs | 2 +- types/src/v0/impls/stake_table.rs | 8 ++++-- utils/src/deployer.rs | 4 +-- utils/src/stake_table.rs | 5 ++++ 11 files changed, 44 insertions(+), 43 deletions(-) diff --git a/.env b/.env index 8688fbabdc..8121b24844 100644 --- a/.env +++ b/.env @@ -57,10 +57,10 @@ ESPRESSO_BUILDER_ETH_ACCOUNT_INDEX=8 ESPRESSO_DEPLOYER_ACCOUNT_INDEX=9 # Contracts -ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS=0x0c8e79f3534b00d9a3d4a856b665bf4ebc22f2ba +ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS=0xe1da8919f262ee86f9be05059c9280142cf23f48 ESPRESSO_SEQUENCER_LIGHTCLIENT_ADDRESS=$ESPRESSO_SEQUENCER_LIGHT_CLIENT_PROXY_ADDRESS ESPRESSO_SEQUENCER_PERMISSIONED_PROVER=0x14dc79964da2c08b23698b3d3cc7ca32193d9955 -ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS=0x8ce361602b935680e8dec218b820ff5056beb7af +ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS=0xb19b36b1456e65e3a6d514d3f715f204bd59f431 # Example sequencer demo private keys ESPRESSO_DEMO_SEQUENCER_STAKING_PRIVATE_KEY_0=BLS_SIGNING_KEY~lNDh4Pn-pTAyzyprOAFdXHwhrKhEwqwtMtkD3CZF4x3o diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml index b7a5994ced..8d174799a1 100644 --- a/data/genesis/demo-pos-base.toml +++ b/data/genesis/demo-pos-base.toml @@ -11,7 +11,7 @@ base_fee = '1 wei' fee_recipient = "0x0000000000000000000000000000000000000000" # bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" -stake_table_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" +stake_table_contract = "0xb19b36b1456e65e3a6d514d3f715f204bd59f431" [header] timestamp = "1970-01-01T00:00:00Z" diff --git a/data/genesis/demo-pos.toml b/data/genesis/demo-pos.toml index 09f60756ab..b18e972383 100644 --- a/data/genesis/demo-pos.toml +++ b/data/genesis/demo-pos.toml @@ -30,4 +30,4 @@ base_fee = '1 wei' fee_recipient = "0x0000000000000000000000000000000000000000" bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" -stake_table_contract = "0x8ce361602b935680e8dec218b820ff5056beb7af" \ No newline at end of file +stake_table_contract = "0xb19b36b1456e65e3a6d514d3f715f204bd59f431" \ No newline at end of file diff --git a/data/initial_stake_table.toml b/data/initial_stake_table.toml index 59384a6509..57bbdffc68 100644 --- a/data/initial_stake_table.toml +++ b/data/initial_stake_table.toml @@ -1,29 +1,29 @@ -[[public_keys]] +[[new_stakers]] stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" state_ver_key = "SCHNORR_VER_KEY~ibJCbfPOhDoURqiGLe683TDJ_KOLQCx8_Hdq43dOviSuL6WJJ_2mARKO3xA2k5zpXE3iiq4_z7mzvA-V1VXvIWw" da = true stake = 1 -[[public_keys]] +[[new_stakers]] stake_table_key = "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR" state_ver_key = "SCHNORR_VER_KEY~lNCMqH5qLthH5OXxW_Z25tLXJUqmzzhsuQ6oVuaPWhtRPmgIKSqcBoJTaEbmGZL2VfTyQNguaoQL4U_4tCA_HmI" da = true stake = 1 -[[public_keys]] +[[new_stakers]] stake_table_key = "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4" state_ver_key = "SCHNORR_VER_KEY~nkFKzpLhJAafJ3LBkY_0h9OzxSyTu95Z029EUFPO4QNkeUo6DHQGTTVjxmprTA5H8jRSn73i0slJvig6dZ5kLX4" da = true stake = 1 -[[public_keys]] +[[new_stakers]] stake_table_key = "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL" state_ver_key = "SCHNORR_VER_KEY~NwYhzlWarlZHxTNvChWuf74O3fP7zIt5NdC7V8gV6w2W92JOBDkrNmKQeMGxMUke-G5HHxUjHlZEWr1m1xLjEaI" da = false stake = 1 -[[public_keys]] +[[new_stakers]] stake_table_key = "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7" state_ver_key = "SCHNORR_VER_KEY~qMfMj1c1hRVTnugvz3MKNnVC5JA9jvZcV3ZCLL_J4Ap-u0i6ulGWveTk3OOelZj2-kd_WD5ojtYGWV1jHx9wCaA" da = true diff --git a/process-compose.yaml b/process-compose.yaml index 14906a67a8..6699a4fe90 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -28,7 +28,9 @@ processes: command: unset ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS ESPRESSO_SEQUENCER_FEE_CONTRACT_PROXY_ADDRESS - && deploy --only fee-contract,permissioned-stake-table + && deploy --only fee-contract + && unset ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS + && deploy --only permissioned-stake-table namespace: setup depends_on: demo-l1-network: @@ -111,6 +113,19 @@ processes: condition: process_healthy deploy-prover-contracts: condition: process_completed + + update-permissioned-stake-table: + command: update-permissioned-stake-table + environment: + - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS + - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_UPDATE_TOML_PATH=data/initial_stake_table.toml + depends_on: + deploy-prover-contracts: + condition: process_completed + sequencer0: + condition: process_healthy + + sequencer0: command: sequencer -- storage-sql -- http -- query -- submit -- hotshot-events -- config @@ -534,7 +549,7 @@ processes: host: localhost port: $ESPRESSO_BUILDER_SERVER_PORT path: /healthcheck - failure_threshold: 5 + failure_threshold: 100 period_seconds: 1 availability: restart: "exit_on_failure" diff --git a/sequencer/src/bin/deploy.rs b/sequencer/src/bin/deploy.rs index 8a5916bd43..d766827cc0 100644 --- a/sequencer/src/bin/deploy.rs +++ b/sequencer/src/bin/deploy.rs @@ -9,7 +9,6 @@ use hotshot_state_prover::service::light_client_genesis; use sequencer_utils::{ deployer::{deploy, ContractGroup, Contracts, DeployedContracts}, logging, - stake_table::PermissionedStakeTableConfig, }; use url::Url; @@ -122,22 +121,6 @@ struct Options { /// If the light client contract is not being deployed, this option is ignored. #[clap(long, env = "ESPRESSO_SEQUENCER_PERMISSIONED_PROVER")] permissioned_prover: Option

, - - /// A toml file with the initial stake table. - /// - /// Schema: - /// - /// public_keys = [ - /// { - /// stake_table_key = "BLS_VER_KEY~...", - /// state_ver_key = "SCHNORR_VER_KEY~...", - /// da = true, - /// stake = 1, # this value is ignored, but needs to be set - /// }, - /// ] - #[clap(long, env = "ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")] - initial_stake_table_path: Option, - #[clap(flatten)] logging: logging::Config, } @@ -153,13 +136,6 @@ async fn main() -> anyhow::Result<()> { let genesis = light_client_genesis(&sequencer_url, opt.stake_table_capacity).boxed(); - let initial_stake_table = if let Some(path) = opt.initial_stake_table_path { - tracing::info!("Loading initial stake table from {:?}", path); - Some(PermissionedStakeTableConfig::from_toml_file(&path)?.into()) - } else { - None - }; - let contracts = deploy( opt.rpc_url, opt.l1_polling_interval, @@ -171,7 +147,6 @@ async fn main() -> anyhow::Result<()> { genesis, opt.permissioned_prover, contracts, - initial_stake_table, ) .await?; diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index 28e30767d6..890080ef3e 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -87,7 +87,11 @@ struct Options { async fn main() -> Result<()> { let opts = Options::parse(); opts.logging.init(); - let update = PermissionedStakeTableUpdate::from_toml_file(&opts.update_toml_path)?; + + let path = opts.update_toml_path; + + tracing::error!("updating stake table from path: {path:?}"); + let update = PermissionedStakeTableUpdate::from_toml_file(&path)?; update_stake_table( opts.rpc_url, diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 1d13e38029..08dc912fa7 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -363,7 +363,7 @@ pub async fn init_node( } //todo(abdul): get from genesis file - network_config.config.epoch_height = 150; + network_config.config.epoch_height = 10; // If the `Libp2p` bootstrap nodes were supplied via the command line, override those // present in the config file. diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index d52637cc48..f54925e8b7 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -84,6 +84,7 @@ impl StakeTables { da_members.push(node.into()); } } + Self::new(consensus_stake_table.into(), da_members.into()) } } @@ -247,12 +248,14 @@ impl EpochCommittees { map.insert(Epoch::new(epoch), members.clone()); } + let address = instance_state.chain_config.stake_table_contract; + Self { non_epoch_committee: members, state: map, _epoch_size: epoch_size, l1_client: instance_state.l1_client.clone(), - contract_address: instance_state.chain_config.stake_table_contract, + contract_address: address, } } @@ -488,8 +491,9 @@ impl Membership for EpochCommittees { block_header: Header, ) -> Option> { let address = self.contract_address?; + self.l1_client - .get_stake_table(address.to_alloy(), block_header.height()) + .get_stake_table(address.to_alloy(), block_header.l1_head()) .await .ok() .map(|stake_table| -> Box { diff --git a/utils/src/deployer.rs b/utils/src/deployer.rs index b19f249d6b..9210ceb436 100644 --- a/utils/src/deployer.rs +++ b/utils/src/deployer.rs @@ -321,7 +321,6 @@ pub async fn deploy( genesis: BoxFuture<'_, anyhow::Result<(ParsedLightClientState, ParsedStakeTableState)>>, permissioned_prover: Option
, mut contracts: Contracts, - initial_stake_table: Option>, ) -> anyhow::Result { let provider = Provider::::try_from(l1url.to_string())?.interval(l1_interval); let chain_id = provider.get_chainid().await?.as_u64(); @@ -461,11 +460,10 @@ pub async fn deploy( // `PermissionedStakeTable.sol` if should_deploy(ContractGroup::PermissionedStakeTable, &only) { - let initial_stake_table: Vec<_> = initial_stake_table.unwrap_or_default(); let stake_table_address = contracts .deploy_tx( Contract::PermissonedStakeTable, - PermissionedStakeTable::deploy(l1.clone(), initial_stake_table)?, + PermissionedStakeTable::deploy(l1.clone(), Vec::::new())?, ) .await?; let stake_table = PermissionedStakeTable::new(stake_table_address, l1.clone()); diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 2dd0fd0ef5..7d01517e3e 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -137,12 +137,17 @@ pub async fn update_stake_table( .index(account_index)? .build()? .with_chain_id(chain_id); + let l1 = Arc::new(SignerMiddleware::new(provider.clone(), wallet)); let contract = PermissionedStakeTable::new(contract_address, l1); tracing::info!("sending stake table update transaction"); + if update.stakers_to_remove().is_empty() && update.new_stakers().is_empty() { + anyhow::bail!("No changes to update in the stake table"); + } + let tx_receipt = contract .update(update.stakers_to_remove(), update.new_stakers()) .send() From 35ad5b84fab3df0fde192534665610cb93fb9fe3 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 17 Feb 2025 14:38:16 +0500 Subject: [PATCH 068/120] fix build and clippy --- sequencer/src/run.rs | 17 +++++++---------- types/src/v0/impls/header.rs | 7 ++++--- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 6efe90c484..53aa25291a 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -8,10 +8,7 @@ use super::{ persistence, Genesis, L1Params, NetworkParams, }; use clap::Parser; -use espresso_types::{ - traits::NullEventConsumer, EpochVersion, FeeVersion, MarketplaceVersion, SequencerVersions, - SolverAuctionResultsProvider, V0_0, -}; +use espresso_types::{traits::NullEventConsumer, SequencerVersions, SolverAuctionResultsProvider}; use futures::future::FutureExt; use hotshot::MarketplaceConfig; use hotshot_types::traits::{metrics::NoMetrics, node_implementation::Versions}; @@ -39,34 +36,34 @@ pub async fn main() -> anyhow::Result<()> { match (base, upgrade) { #[cfg(all(feature = "fee", feature = "pos"))] - (FeeVersion::VERSION, EpochVersion::VERSION) => { + (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { run( genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } #[cfg(feature = "pos")] - (EpochVersion::VERSION, _) => { + (espresso_types::EpochVersion::VERSION, _) => { run( genesis, modules, opt, // Specifying V0_0 disables upgrades - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } // TODO change `fee` to `pos` #[cfg(all(feature = "fee", feature = "marketplace"))] - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { + (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { run( genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await } diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index bb7b955ae2..0c54a265dc 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -33,7 +33,7 @@ use crate::{ v0_1, v0_2, v0_3, v0_99::{self, ChainConfig, IterableFeeInfo, SolverAuctionResults}, BlockMerkleCommitment, BuilderSignature, FeeAccount, FeeAmount, FeeInfo, FeeMerkleCommitment, - Header, L1BlockInfo, L1Snapshot, Leaf2, NamespaceId, NsTable, SeqTypes, UpgradeType, + Header, L1BlockInfo, L1Snapshot, Leaf2, NamespaceId, NsTable, SeqTypes, }; use super::{instance_state::NodeState, state::ValidatedState}; @@ -974,8 +974,9 @@ impl BlockHeader for Header { let mut validated_state = parent_state.clone(); - let chain_config = instance_state.upgrade_chain_config(version) else { - Header::get_chain_config(&validated_state, instance_state).await? + let chain_config = match instance_state.upgrade_chain_config(version) { + Some(chain_config) => chain_config, + None => Header::get_chain_config(&validated_state, instance_state).await?, }; validated_state.chain_config = chain_config.into(); From 9fee0895ca603e78b9fbf15ab3194b56a6cbe0b6 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 17 Feb 2025 14:57:18 +0500 Subject: [PATCH 069/120] revert justfile --- justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/justfile b/justfile index 2aa0f43e95..84b4493845 100644 --- a/justfile +++ b/justfile @@ -1,4 +1,4 @@ -# export RUST_BACKTRACE := "1" +mod hotshot default: just --list From ac7d8112b6f0b5ccbe73f7c0e79e472aa6b5bed4 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 17 Feb 2025 15:45:25 +0500 Subject: [PATCH 070/120] fix devnode --- sequencer/src/bin/espresso-dev-node.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/sequencer/src/bin/espresso-dev-node.rs b/sequencer/src/bin/espresso-dev-node.rs index 7fed6cec03..c80fb4c72d 100644 --- a/sequencer/src/bin/espresso-dev-node.rs +++ b/sequencer/src/bin/espresso-dev-node.rs @@ -292,7 +292,6 @@ async fn main() -> anyhow::Result<()> { async { Ok(lc_genesis.clone()) }.boxed(), None, contracts.clone(), - None, // initial stake table ) .await?; From b45011d1851c7bd4984f3bc8d1d21d4f3cdd1072 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 17 Feb 2025 16:21:02 +0500 Subject: [PATCH 071/120] do not validate and apply header for block in epoch transition --- sequencer/src/lib.rs | 2 +- types/src/v0/impls/instance_state.rs | 4 ++-- types/src/v0/impls/state.rs | 6 ++++++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 08dc912fa7..d57c4fdc5d 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -483,7 +483,7 @@ pub async fn init_node( node_id: node_index, upgrades: genesis.upgrades, current_version: V::Base::VERSION, - epoch_height: None, + epoch_height: network_config.config.epoch_height, }; // Create the HotShot membership diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index b7f97484ec..afa4d681ca 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -24,7 +24,7 @@ pub struct NodeState { pub genesis_header: GenesisHeader, pub genesis_state: ValidatedState, pub l1_genesis: Option, - pub epoch_height: Option, + pub epoch_height: u64, /// Map containing all planned and executed upgrades. /// @@ -65,7 +65,7 @@ impl NodeState { l1_genesis: None, upgrades: Default::default(), current_version, - epoch_height: None, + epoch_height: 0, } } diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index a5ab7cfec0..453f33a796 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -909,6 +909,12 @@ impl HotShotState for ValidatedState { version: Version, view_number: u64, ) -> Result<(Self, Self::Delta), Self::Error> { + if proposed_header.height() % instance.epoch_height == 0 + && parent_leaf.height() == proposed_header.height() + { + return Ok((self.clone(), Delta::default())); + } + // Unwrapping here is okay as we retry in a loop //so we should either get a validated state or until hotshot cancels the task let (validated_state, delta) = self From 103b5e2e607a736d7b121f42b89abd68194219bb Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 17 Feb 2025 16:24:15 +0500 Subject: [PATCH 072/120] add comments to validate_and_apply_header() --- types/src/v0/impls/state.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 453f33a796..aecc11c7a6 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -909,6 +909,9 @@ impl HotShotState for ValidatedState { version: Version, view_number: u64, ) -> Result<(Self, Self::Delta), Self::Error> { + // During epoch transition, hotshot propagates the same block again + // we should totally skip this block, and return the same validated state + // This block will have the same parent block height if proposed_header.height() % instance.epoch_height == 0 && parent_leaf.height() == proposed_header.height() { From cd9c06d69bda04cf67c78b37559dad935ea46361 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 17 Feb 2025 17:22:59 +0500 Subject: [PATCH 073/120] fix log --- types/src/v0/impls/state.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index aecc11c7a6..2efa503442 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -912,9 +912,23 @@ impl HotShotState for ValidatedState { // During epoch transition, hotshot propagates the same block again // we should totally skip this block, and return the same validated state // This block will have the same parent block height + + tracing::info!( + "parent_height={} proposed_height={}", + parent_leaf.height(), + proposed_header.height(), + ); + if proposed_header.height() % instance.epoch_height == 0 && parent_leaf.height() == proposed_header.height() { + tracing::info!( + "skipping block.. parent_height={} proposed_height={} epoch_height={}", + parent_leaf.height(), + proposed_header.height(), + instance.epoch_height, + ); + return Ok((self.clone(), Delta::default())); } From 35aaa4728d460eb8f9b9cf17fd07ad36013e44d2 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 02:17:44 +0500 Subject: [PATCH 074/120] todo: l1 events order --- types/src/v0/impls/stake_table.rs | 77 ++++++++++++++++++++++++------- types/src/v0/impls/state.rs | 4 +- 2 files changed, 62 insertions(+), 19 deletions(-) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index f54925e8b7..f7c5651b5a 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -189,6 +189,8 @@ impl EpochCommittees { }; self.state.insert(epoch, committee.clone()); + self.state.insert(epoch + 1, committee.clone()); + self.state.insert(epoch + 2, committee.clone()); committee } @@ -341,19 +343,25 @@ impl Membership for EpochCommittees { /// Get the stake table for the current view fn stake_table(&self, epoch: Option) -> Vec> { - if let Some(st) = self.state(&epoch) { + let st = if let Some(st) = self.state(&epoch) { st.stake_table.clone() } else { vec![] - } + }; + + tracing::debug!("stake table = {st:?}"); + st } /// Get the stake table for the current view fn da_stake_table(&self, epoch: Option) -> Vec> { - if let Some(sc) = self.state(&epoch) { + let da = if let Some(sc) = self.state(&epoch) { sc.da_members.clone() } else { vec![] - } + }; + + tracing::debug!("da members = {da:?}"); + da } /// Get all members of the committee for the current view @@ -362,11 +370,15 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - if let Some(sc) = self.state(&epoch) { + let committee = if let Some(sc) = self.state(&epoch) { sc.indexed_stake_table.clone().into_keys().collect() } else { BTreeSet::new() - } + }; + + tracing::debug!("committee={committee:?}"); + + committee } /// Get all members of the committee for the current view @@ -375,11 +387,14 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - if let Some(sc) = self.state(&epoch) { + let da = if let Some(sc) = self.state(&epoch) { sc.indexed_da_members.clone().into_keys().collect() } else { BTreeSet::new() - } + }; + tracing::debug!("da committee={da:?}"); + + da } /// Get all eligible leaders of the committee for the current view @@ -388,12 +403,16 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - self.state(&epoch) + let committee_leaders = self + .state(&epoch) .unwrap() .eligible_leaders .iter() .map(PubKey::public_key) - .collect() + .collect(); + + tracing::debug!("committee_leaders={committee_leaders:?}"); + committee_leaders } /// Get the stake table entry for a public key @@ -436,6 +455,8 @@ impl Membership for EpochCommittees { .eligible_leaders .clone(); + tracing::debug!("lookup_leader() leaders={leaders:?}"); + let index = *view_number as usize % leaders.len(); let res = leaders[index].clone(); Ok(PubKey::public_key(&res)) @@ -490,17 +511,39 @@ impl Membership for EpochCommittees { epoch: Epoch, block_header: Header, ) -> Option> { + + // TODO: (abdul) fix fetching from contracts + // so that order of l1 events match with the update let address = self.contract_address?; + let genesis_st = self.state(&Some(Epoch::genesis())).unwrap().clone(); - self.l1_client + let st = self + .l1_client .get_stake_table(address.to_alloy(), block_header.l1_head()) .await - .ok() - .map(|stake_table| -> Box { - Box::new(move |committee: &mut Self| { - let _ = committee.update_stake_table(epoch, stake_table); - }) - }) + .ok(); + + let epoch_0_st = genesis_st.stake_table; + let epoch0_da = genesis_st.da_members; + + let sss = st.clone().unwrap(); + let contract_st = sss.stake_table; + let contract_da = sss.da_members; + + tracing::warn!("epoch0 st= {epoch_0_st:?}"); + tracing::warn!("contract st= {contract_st:?}"); + + tracing::warn!("epoch0 da= {contract_da:?}"); + tracing::warn!("contact da= {epoch0_da:?}"); + + let stake_tables = StakeTables { + stake_table: epoch_0_st.into(), + da_members: epoch0_da.into(), + }; + + Some(Box::new(move |committee: &mut Self| { + let _ = committee.update_stake_table(epoch, stake_tables); + })) } } diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 2efa503442..9876c3ff96 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -913,7 +913,7 @@ impl HotShotState for ValidatedState { // we should totally skip this block, and return the same validated state // This block will have the same parent block height - tracing::info!( + tracing::error!( "parent_height={} proposed_height={}", parent_leaf.height(), proposed_header.height(), @@ -922,7 +922,7 @@ impl HotShotState for ValidatedState { if proposed_header.height() % instance.epoch_height == 0 && parent_leaf.height() == proposed_header.height() { - tracing::info!( + tracing::error!( "skipping block.. parent_height={} proposed_height={} epoch_height={}", parent_leaf.height(), proposed_header.height(), From a2a764c7bffbcd834408c8892f1104f7d173bae9 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 16:48:19 +0500 Subject: [PATCH 075/120] save initial stake table from hotshot config --- flake.nix | 10 ++-------- sequencer/src/lib.rs | 7 +++++++ types/src/v0/impls/stake_table.rs | 1 - types/src/v0/impls/state.rs | 2 +- utils/src/stake_table.rs | 32 ++++++++++++++++++++++++++++++- 5 files changed, 41 insertions(+), 11 deletions(-) diff --git a/flake.nix b/flake.nix index 47b0a13bba..be4c42ecc4 100644 --- a/flake.nix +++ b/flake.nix @@ -67,13 +67,7 @@ solhintPkg { inherit (prev) buildNpmPackage fetchFromGitHub; }; }) - # The mold linker is around 50% faster on Linux than the default linker. - # This overlays a mkShell that is configured to use mold on Linux. - (final: prev: prev.lib.optionalAttrs prev.stdenv.isLinux { - mkShell = prev.mkShell.override { - stdenv = prev.stdenvAdapters.useMoldLinker prev.clangStdenv; - }; - }) + ]; pkgs = import nixpkgs { inherit system overlays; }; crossShell = { config }: @@ -323,4 +317,4 @@ ]; }); }); -} +} \ No newline at end of file diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index d57c4fdc5d..79eb77c133 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -23,6 +23,7 @@ use espresso_types::{ use ethers_conv::ToAlloy; use genesis::L1Finalized; use proposal_fetcher::ProposalFetcherConfig; +use sequencer_utils::stake_table::PermissionedStakeTableUpdate; use std::sync::Arc; use tokio::select; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support @@ -494,6 +495,12 @@ pub async fn init_node( network_config.config.epoch_height, ); + // save initial stake table into toml file + // this will be helpful to load it into contract + PermissionedStakeTableUpdate::save_initial_stake_table_from_hotshot_config( + network_config.config.clone(), + )?; + // Initialize the Libp2p network let network = { let p2p_network = Libp2pNetwork::from_config( diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index f7c5651b5a..0ba7f1ddae 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -546,7 +546,6 @@ impl Membership for EpochCommittees { })) } } - #[cfg(test)] mod tests { use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::NodeInfo; diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 9876c3ff96..f192a74274 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -913,7 +913,7 @@ impl HotShotState for ValidatedState { // we should totally skip this block, and return the same validated state // This block will have the same parent block height - tracing::error!( + tracing::debug!( "parent_height={} proposed_height={}", parent_leaf.height(), proposed_header.height(), diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 7d01517e3e..414991f654 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -14,7 +14,7 @@ use ethers::{ }; use hotshot::types::BLSPubKey; use hotshot_contract_adapter::stake_table::{bls_jf_to_sol, NodeInfoJf}; -use hotshot_types::network::PeerConfigKeys; +use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType, HotShotConfig}; use url::Url; use std::{fs, path::Path, sync::Arc, time::Duration}; @@ -104,6 +104,16 @@ impl PermissionedStakeTableUpdate { ) } + pub fn to_toml_file(&self, path: &Path) -> anyhow::Result<()> { + let toml_string = toml::to_string_pretty(self) + .unwrap_or_else(|err| panic!("Failed to serialize config to TOML: {err}")); + + fs::write(path, toml_string) + .unwrap_or_else(|_| panic!("Could not write config file to {}", path.display())); + + Ok(()) + } + fn stakers_to_remove(&self) -> Vec { self.stakers_to_remove .iter() @@ -120,6 +130,26 @@ impl PermissionedStakeTableUpdate { }) .collect() } + + pub fn save_initial_stake_table_from_hotshot_config( + config: HotShotConfig, + ) -> anyhow::Result<()> { + let committee_members = config.known_nodes_with_stake.clone(); + let known_da_nodes = config.known_da_nodes.clone().clone(); + let members = committee_members + .into_iter() + .map(|m| PeerConfigKeys { + stake_table_key: m.stake_table_entry.public_key(), + state_ver_key: m.state_ver_key.clone(), + stake: m.stake_table_entry.stake().as_u64(), + da: known_da_nodes.contains(&m), + }) + .collect(); + + Self::new(members, vec![]).to_toml_file(Path::new("data/initial_stake_table.toml"))?; + + Ok(()) + } } pub async fn update_stake_table( From bc7dee590a3ae69d26c932ddca04c0c2745bb272 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 16:56:19 +0500 Subject: [PATCH 076/120] lint --- types/src/v0/impls/stake_table.rs | 1 - utils/src/stake_table.rs | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 0ba7f1ddae..081b4a7c19 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -511,7 +511,6 @@ impl Membership for EpochCommittees { epoch: Epoch, block_header: Header, ) -> Option> { - // TODO: (abdul) fix fetching from contracts // so that order of l1 events match with the update let address = self.contract_address?; diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 414991f654..ca20a8680f 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -14,7 +14,9 @@ use ethers::{ }; use hotshot::types::BLSPubKey; use hotshot_contract_adapter::stake_table::{bls_jf_to_sol, NodeInfoJf}; -use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType, HotShotConfig}; +use hotshot_types::{ + network::PeerConfigKeys, traits::signature_key::StakeTableEntryType, HotShotConfig, +}; use url::Url; use std::{fs, path::Path, sync::Arc, time::Duration}; From c47a77eec697c8b79ce9b0ba1854a3016bc06db0 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 17:05:18 +0500 Subject: [PATCH 077/120] fix env --- .github/workflows/test.yml | 2 +- .gitignore | 5 ++++- client/Cargo.toml | 2 +- data/initial_stake_table.toml | 30 ------------------------------ process-compose.yaml | 3 +-- 5 files changed, 7 insertions(+), 35 deletions(-) delete mode 100644 data/initial_stake_table.toml diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f778253c44..8c93588ff5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -199,7 +199,7 @@ jobs: compose: "-f process-compose.yaml -D" - version: 03 env: - ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos-base.toml + ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos.toml compose: "-f process-compose.yaml -D" - version: 99 compose: "-f process-compose.yaml -f process-compose-mp.yml -D" diff --git a/.gitignore b/.gitignore index 17dd3e5e9a..0645c7ceda 100644 --- a/.gitignore +++ b/.gitignore @@ -56,4 +56,7 @@ contracts/broadcast/*/11155111/ docs/ # Autogen files -.vscode/ \ No newline at end of file +.vscode/ + +# initial stake table +data/initial_stake_table.toml \ No newline at end of file diff --git a/client/Cargo.toml b/client/Cargo.toml index c38a613844..9fe84cf055 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -10,9 +10,9 @@ anyhow = { workspace = true } espresso-types = { path = "../types" } ethers = { workspace = true } futures = { workspace = true } +hotshot-types = { workspace = true } jf-merkle-tree = { workspace = true } surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } -hotshot-types = { workspace = true } \ No newline at end of file diff --git a/data/initial_stake_table.toml b/data/initial_stake_table.toml deleted file mode 100644 index 57bbdffc68..0000000000 --- a/data/initial_stake_table.toml +++ /dev/null @@ -1,30 +0,0 @@ -[[new_stakers]] -stake_table_key = "BLS_VER_KEY~bQszS-QKYvUij2g20VqS8asttGSb95NrTu2PUj0uMh1CBUxNy1FqyPDjZqB29M7ZbjWqj79QkEOWkpga84AmDYUeTuWmy-0P1AdKHD3ehc-dKvei78BDj5USwXPJiDUlCxvYs_9rWYhagaq-5_LXENr78xel17spftNd5MA1Mw5U" -state_ver_key = "SCHNORR_VER_KEY~ibJCbfPOhDoURqiGLe683TDJ_KOLQCx8_Hdq43dOviSuL6WJJ_2mARKO3xA2k5zpXE3iiq4_z7mzvA-V1VXvIWw" -da = true -stake = 1 - -[[new_stakers]] -stake_table_key = "BLS_VER_KEY~4zQnaCOFJ7m95OjxeNls0QOOwWbz4rfxaL3NwmN2zSdnf8t5Nw_dfmMHq05ee8jCegw6Bn5T8inmrnGGAsQJMMWLv77nd7FJziz2ViAbXg-XGGF7o4HyzELCmypDOIYF3X2UWferFE_n72ZX0iQkUhOvYZZ7cfXToXxRTtb_mwRR" -state_ver_key = "SCHNORR_VER_KEY~lNCMqH5qLthH5OXxW_Z25tLXJUqmzzhsuQ6oVuaPWhtRPmgIKSqcBoJTaEbmGZL2VfTyQNguaoQL4U_4tCA_HmI" -da = true -stake = 1 - -[[new_stakers]] -stake_table_key = "BLS_VER_KEY~IBRoz_Q1EXvcm1pNZcmVlyYZU8hZ7qmy337ePAjEMhz8Hl2q8vWPFOd3BaLwgRS1UzAPW3z4E-XIgRDGcRBTAMZX9b_0lKYjlyTlNF2EZfNnKmvv-xJ0yurkfjiveeYEsD2l5d8q_rJJbH1iZdXy-yPEbwI0SIvQfwdlcaKw9po4" -state_ver_key = "SCHNORR_VER_KEY~nkFKzpLhJAafJ3LBkY_0h9OzxSyTu95Z029EUFPO4QNkeUo6DHQGTTVjxmprTA5H8jRSn73i0slJvig6dZ5kLX4" -da = true -stake = 1 - -[[new_stakers]] -stake_table_key = "BLS_VER_KEY~rO2PIjyY30HGfapFcloFe3mNDKMIFi6JlOLkH5ZWBSYoRm5fE2-Rm6Lp3EvmAcB5r7KFJ0c1Uor308x78r04EY_sfjcsDCWt7RSJdL4cJoD_4fSTCv_bisO8k98hs_8BtqQt8BHlPeJohpUXvcfnK8suXJETiJ6Er97pfxRbzgAL" -state_ver_key = "SCHNORR_VER_KEY~NwYhzlWarlZHxTNvChWuf74O3fP7zIt5NdC7V8gV6w2W92JOBDkrNmKQeMGxMUke-G5HHxUjHlZEWr1m1xLjEaI" -da = false -stake = 1 - - -[[new_stakers]] -stake_table_key = "BLS_VER_KEY~r6b-Cwzp-b3czlt0MHmYPJIow5kMsXbrNmZsLSYg9RV49oCCO4WEeCRFR02x9bqLCa_sgNFMrIeNdEa11qNiBAohApYFIvrSa-zP5QGj3xbZaMOCrshxYit6E2TR-XsWvv6gjOrypmugjyTAth-iqQzTboSfmO9DD1-gjJIdCaD7" -state_ver_key = "SCHNORR_VER_KEY~qMfMj1c1hRVTnugvz3MKNnVC5JA9jvZcV3ZCLL_J4Ap-u0i6ulGWveTk3OOelZj2-kd_WD5ojtYGWV1jHx9wCaA" -da = true -stake = 1 \ No newline at end of file diff --git a/process-compose.yaml b/process-compose.yaml index 6699a4fe90..7baa6767c4 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -5,8 +5,7 @@ environment: - ESPRESSO_SEQUENCER_ORCHESTRATOR_URL=http://localhost:$ESPRESSO_ORCHESTRATOR_PORT - ESPRESSO_SEQUENCER_URL=http://localhost:$ESPRESSO_SEQUENCER_API_PORT - ESPRESSO_SEQUENCER_L1_PROVIDER=http://localhost:$ESPRESSO_SEQUENCER_L1_PORT - # - ESPRESSO_SEQUENCER_GENESIS_FILE=$DEMO_GENESIS_FILE - # - ESPRESSO_BUILDER_GENESIS_FILE=$DEMO_GENESIS_FILE + - ESPRESSO_BUILDER_GENESIS_FILE=$ESPRESSO_SEQUENCER_GENESIS_FILE - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=data/initial_stake_table.toml - ESPRESSO_STATE_RELAY_SERVER_URL=http://localhost:$ESPRESSO_STATE_RELAY_SERVER_PORT - QUERY_SERVICE_URI=http://localhost:$ESPRESSO_SEQUENCER1_API_PORT/v0/ From ab1cef68d74cdff477e47d3646a703f586652f75 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 18:02:17 +0500 Subject: [PATCH 078/120] features for sqlite --- justfile | 8 ++++---- sequencer-sqlite/Cargo.toml | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/justfile b/justfile index 84b4493845..fa6b733edb 100644 --- a/justfile +++ b/justfile @@ -20,18 +20,18 @@ lint: cargo clippy --workspace --features testing --all-targets -- -D warnings cargo clippy --workspace --all-targets --manifest-path sequencer-sqlite/Cargo.toml -- -D warnings -build profile="test": +build profile="test" features="": #!/usr/bin/env bash set -euxo pipefail # Use the same target dir for both `build` invocations export CARGO_TARGET_DIR=${CARGO_TARGET_DIR:-target} - cargo build --profile {{profile}} - cargo build --profile {{profile}} --manifest-path ./sequencer-sqlite/Cargo.toml + cargo build --profile {{profile}} {{features}} + cargo build --profile {{profile}} --manifest-path ./sequencer-sqlite/Cargo.toml {{features}} demo-native-mp *args: build scripts/demo-native -f process-compose.yaml -f process-compose-mp.yml {{args}} -demo-native-pos *args: build +demo-native-pos *args: (build "test" "--features fee,pos") ESPRESSO_SEQUENCER_GENESIS_FILE=data/genesis/demo-pos.toml scripts/demo-native -f process-compose.yaml {{args}} demo-native-pos-base *args: build diff --git a/sequencer-sqlite/Cargo.toml b/sequencer-sqlite/Cargo.toml index 5c6c1e6d58..189225244f 100644 --- a/sequencer-sqlite/Cargo.toml +++ b/sequencer-sqlite/Cargo.toml @@ -7,7 +7,9 @@ version = "0.1.0" edition = "2021" [features] -default = ["embedded-db"] +fee = ["sequencer/fee"] +pos = ["sequencer/pos"] +default = ["embedded-db", "pos"] sqlite-unbundled = ["sequencer/sqlite-unbundled"] embedded-db = ["sequencer/embedded-db"] From fc990f30677f71f5d02142684880cfcf3a617d68 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 18:20:46 +0500 Subject: [PATCH 079/120] revert add_epoch_root() --- types/src/v0/impls/stake_table.rs | 35 +++++++------------------------ 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 081b4a7c19..990e5b6142 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -511,38 +511,17 @@ impl Membership for EpochCommittees { epoch: Epoch, block_header: Header, ) -> Option> { - // TODO: (abdul) fix fetching from contracts - // so that order of l1 events match with the update let address = self.contract_address?; - let genesis_st = self.state(&Some(Epoch::genesis())).unwrap().clone(); - let st = self - .l1_client + self.l1_client .get_stake_table(address.to_alloy(), block_header.l1_head()) .await - .ok(); - - let epoch_0_st = genesis_st.stake_table; - let epoch0_da = genesis_st.da_members; - - let sss = st.clone().unwrap(); - let contract_st = sss.stake_table; - let contract_da = sss.da_members; - - tracing::warn!("epoch0 st= {epoch_0_st:?}"); - tracing::warn!("contract st= {contract_st:?}"); - - tracing::warn!("epoch0 da= {contract_da:?}"); - tracing::warn!("contact da= {epoch0_da:?}"); - - let stake_tables = StakeTables { - stake_table: epoch_0_st.into(), - da_members: epoch0_da.into(), - }; - - Some(Box::new(move |committee: &mut Self| { - let _ = committee.update_stake_table(epoch, stake_tables); - })) + .ok() + .map(|stake_table| -> Box { + Box::new(move |committee: &mut Self| { + let _ = committee.update_stake_table(epoch, stake_table); + }) + }) } } #[cfg(test)] From 270af288b5de5c4cf1f47a80430c4fbe115d4246 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 18 Feb 2025 22:54:16 +0500 Subject: [PATCH 080/120] add epoch versions to builder main() --- builder/src/bin/permissionless-builder.rs | 28 +++++++++++++------ .../src/bin/marketplace-builder.rs | 25 ++++++++++++----- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 1ff329399e..771e5308eb 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -2,10 +2,7 @@ use std::{num::NonZeroUsize, path::PathBuf, time::Duration}; use builder::non_permissioned::{build_instance_state, BuilderConfig}; use clap::Parser; -use espresso_types::{ - eth_signature_key::EthKeyPair, parse_duration, FeeVersion, MarketplaceVersion, - SequencerVersions, V0_0, -}; +use espresso_types::{eth_signature_key::EthKeyPair, parse_duration, SequencerVersions}; use futures::future::pending; use hotshot::traits::ValidatedState; use hotshot_types::{ @@ -120,12 +117,25 @@ async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { - run::>(genesis, opt).await + (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { + run::>( + genesis, opt + ) + .await + } + (espresso_types::EpochVersion::VERSION, _) => { + run::>( + genesis, opt + // Specifying V0_0 disables upgrades + ) + .await } - (FeeVersion::VERSION, _) => run::>(genesis, opt).await, - (MarketplaceVersion::VERSION, _) => { - run::>(genesis, opt).await + // TODO change `fee` to `pos` + (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { + run::>( + genesis, opt + ) + .await } _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." diff --git a/marketplace-builder/src/bin/marketplace-builder.rs b/marketplace-builder/src/bin/marketplace-builder.rs index c23cf6a1b3..4091a41dce 100644 --- a/marketplace-builder/src/bin/marketplace-builder.rs +++ b/marketplace-builder/src/bin/marketplace-builder.rs @@ -2,8 +2,7 @@ use std::{num::NonZeroUsize, path::PathBuf, time::Duration}; use clap::Parser; use espresso_types::{ - eth_signature_key::EthKeyPair, parse_duration, FeeAmount, FeeVersion, MarketplaceVersion, - NamespaceId, SequencerVersions, V0_0, + eth_signature_key::EthKeyPair, parse_duration, FeeAmount, NamespaceId, SequencerVersions, }; use futures::future::pending; use hotshot::helpers::initialize_logging; @@ -127,12 +126,24 @@ async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { - (FeeVersion::VERSION, MarketplaceVersion::VERSION) => { - run::>(genesis, opt).await + (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { + run::>( + genesis, opt + ) + .await } - (FeeVersion::VERSION, _) => run::>(genesis, opt).await, - (MarketplaceVersion::VERSION, _) => { - run::>(genesis, opt).await + (espresso_types::EpochVersion::VERSION, _) => { + run::>( + genesis, opt + // Specifying V0_0 disables upgrades + ) + .await + } + (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { + run::>( + genesis, opt + ) + .await } _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." From 3f3ea7c9152b67d8147da0cc18f4651b76644a9c Mon Sep 17 00:00:00 2001 From: lukaszrzasik Date: Tue, 18 Feb 2025 22:29:41 +0100 Subject: [PATCH 081/120] Lr/epoch integration test (#2639) --- types/src/v0/impls/state.rs | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index f192a74274..a5ab7cfec0 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -909,29 +909,6 @@ impl HotShotState for ValidatedState { version: Version, view_number: u64, ) -> Result<(Self, Self::Delta), Self::Error> { - // During epoch transition, hotshot propagates the same block again - // we should totally skip this block, and return the same validated state - // This block will have the same parent block height - - tracing::debug!( - "parent_height={} proposed_height={}", - parent_leaf.height(), - proposed_header.height(), - ); - - if proposed_header.height() % instance.epoch_height == 0 - && parent_leaf.height() == proposed_header.height() - { - tracing::error!( - "skipping block.. parent_height={} proposed_height={} epoch_height={}", - parent_leaf.height(), - proposed_header.height(), - instance.epoch_height, - ); - - return Ok((self.clone(), Delta::default())); - } - // Unwrapping here is okay as we retry in a loop //so we should either get a validated state or until hotshot cancels the task let (validated_state, delta) = self From 389d5ae323b6815a9bf6a4cf189d972871d28b92 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 19 Feb 2025 19:02:18 +0500 Subject: [PATCH 082/120] change view settings for pos upgrade --- data/genesis/demo-pos.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data/genesis/demo-pos.toml b/data/genesis/demo-pos.toml index b18e972383..3f99ded1e1 100644 --- a/data/genesis/demo-pos.toml +++ b/data/genesis/demo-pos.toml @@ -6,7 +6,7 @@ capacity = 10 [chain_config] chain_id = 999999999 -base_fee = '0 wei' +base_fee = '1 wei' max_block_size = '1mb' fee_recipient = '0x0000000000000000000000000000000000000000' fee_contract = '0xa15bb66138824a1c7167f5e85b957d04dd34e468' @@ -19,8 +19,8 @@ number = 0 [[upgrade]] version = "0.3" -start_proposing_view = 10 -stop_proposing_view = 60 +start_proposing_view = 65 +stop_proposing_view = 120 [upgrade.epoch] [upgrade.epoch.chain_config] From 54d79a3d0aa264e862676c34c94406c0fd37a5b1 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 19 Feb 2025 20:15:10 +0500 Subject: [PATCH 083/120] more fixes --- Cargo.lock | 2 + client/src/lib.rs | 4 +- sequencer-sqlite/Cargo.lock | 1 + sequencer/src/api.rs | 9 ++++ sequencer/src/api/data_source.rs | 2 + sequencer/src/api/endpoints.rs | 8 ++++ tests/Cargo.toml | 3 +- tests/common/mod.rs | 72 ++++++++++++++++++++++++++++++- tests/smoke.rs | 19 +++++++- tests/upgrades.rs | 66 +--------------------------- types/Cargo.toml | 1 + types/src/v0/impls/stake_table.rs | 68 ++++++++++++++--------------- 12 files changed, 150 insertions(+), 105 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da64fa74a5..eb7579c8c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3705,6 +3705,7 @@ dependencies = [ "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", + "indexmap 2.7.0", "itertools 0.12.1", "jf-merkle-tree", "jf-utils", @@ -11071,6 +11072,7 @@ dependencies = [ "sequencer-utils", "surf-disco", "tokio", + "tracing", "vbs", ] diff --git a/client/src/lib.rs b/client/src/lib.rs index aee2500074..13a54afce4 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -121,9 +121,9 @@ impl SequencerClient { Ok(balance) } - pub async fn current_epoch(&self) -> anyhow::Result { + pub async fn current_epoch(&self) -> anyhow::Result> { self.0 - .get::("node/current_epoch") + .get::>("node/current_epoch") .send() .await .context("getting epoch value") diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 1ce8cf9bba..21ebf4a571 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -3516,6 +3516,7 @@ dependencies = [ "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", + "indexmap 2.7.0", "itertools 0.12.1", "jf-merkle-tree", "jf-utils", diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index ec9cca931d..8a59189e88 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -177,6 +177,11 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> ) -> Vec::SignatureKey>> { self.as_ref().get_stake_table_current().await } + + /// Get the stake table for the current epoch if not provided + async fn get_current_epoch(&self) -> Option<::Epoch> { + self.as_ref().get_current_epoch().await + } } impl, V: Versions, P: SequencerPersistence> @@ -205,6 +210,10 @@ impl, V: Versions, P: SequencerPersistence> self.get_stake_table(epoch).await } + + async fn get_current_epoch(&self) -> Option<::Epoch> { + self.consensus().await.read().await.cur_epoch().await + } } impl, V: Versions, P: SequencerPersistence> SubmitDataSource diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index e3703d3874..52754e2548 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -127,6 +127,8 @@ pub(crate) trait StakeTableDataSource { fn get_stake_table_current( &self, ) -> impl Send + Future>>; + + fn get_current_epoch(&self) -> impl Send + Future>; } pub(crate) trait CatchupDataSource: Sync { diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index ed661026e8..b6d7fc35db 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -216,6 +216,14 @@ where .await) } .boxed() + })? + .at("current_epoch", |_, state| { + async move { + Ok(state + .read(|state| state.get_current_epoch().boxed()) + .await) + } + .boxed() })?; Ok(api) diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 09ae39b76c..69e0e5bef8 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -17,7 +17,8 @@ espresso-types = { path = "../types", features = ["testing"] } ethers = { workspace = true } futures = { workspace = true } reqwest = { workspace = true, features = ["json"] } +sequencer-utils = { path = "../utils" } surf-disco = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } vbs = { workspace = true } -sequencer-utils = { path = "../utils" } \ No newline at end of file diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 22b206cfbe..cea9bb2904 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,13 +1,19 @@ -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use client::SequencerClient; use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; use ethers::prelude::*; use futures::future::join_all; +use std::path::Path; use std::{fmt, str::FromStr, time::Duration}; use surf_disco::Url; use tokio::time::{sleep, timeout}; use vbs::version::StaticVersionType; +use dotenvy::var; +use sequencer_utils::stake_table::{ + update_stake_table, PermissionedStakeTableUpdate, StakerIdentity, +}; + const L1_PROVIDER_RETRY_INTERVAL: Duration = Duration::from_secs(1); // TODO add to .env const RECIPIENT_ADDRESS: &str = "0x0000000000000000000000000000000000000000"; @@ -277,3 +283,67 @@ async fn wait_for_service(url: Url, interval: u64, timeout_duration: u64) -> Res .await .map_err(|e| anyhow!("Wait for service, timeout: ({}) {}", url, e))? } + +pub async fn test_stake_table_update(clients: Vec) -> Result<()> { + /* + EPOCH V3 + */ + + let l1_port = var("ESPRESSO_SEQUENCER_L1_PORT")?; + let account_index = var("ESPRESSO_DEPLOYER_ACCOUNT_INDEX")?; + let contract_address = var("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")?; + let initial_stake_table_path = var("ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")?; + + let permissioned_stake_table = + PermissionedStakeTableUpdate::from_toml_file(Path::new(&initial_stake_table_path))?; + + // initial stake table has 5 new stakers + + let new_stakers = permissioned_stake_table.new_stakers; + //lets remove one + let staker_removed = new_stakers[0].clone(); + + let st_with_one_removed = PermissionedStakeTableUpdate::new( + vec![], + vec![StakerIdentity { + stake_table_key: staker_removed.stake_table_key.clone(), + }], + ); + let client = clients[0].clone(); + + let epoch_before_update = client.current_epoch().await?.context("curr epoch")?; + tracing::warn!("current_epoch={epoch_before_update:?}"); + update_stake_table( + format!("http://localhost:{l1_port}").parse()?, + Duration::from_secs(7), + "test test test test test test test test test test test junk".to_string(), + account_index.parse()?, + contract_address.parse()?, + st_with_one_removed, + ) + .await?; + + loop { + sleep(Duration::from_secs(10)).await; + let epoch = clients[0].current_epoch().await?.context("curr epoch")?; + tracing::info!("current_epoch={epoch:?}"); + if epoch > epoch_before_update + 6 { + let stake_table = client.stake_table(epoch).await?; + tracing::info!("stake_table={stake_table:?}"); + assert_eq!(stake_table.len(), 4); + + assert!( + stake_table + .iter() + .all(|st| st.stake_key != staker_removed.stake_table_key), + "Entry for {} already exists in the stake table", + staker_removed.stake_table_key + ); + + break; + } + } + // TODO: randomize this test + + Ok(()) +} diff --git a/tests/smoke.rs b/tests/smoke.rs index 69ea3be272..7177a90151 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -1,6 +1,7 @@ -use crate::common::TestConfig; -use anyhow::Result; +use crate::common::{test_stake_table_update, TestConfig}; +use anyhow::{Context, Result}; use futures::StreamExt; +use sequencer_utils::test_utils::setup_test; use std::time::Instant; /// We allow for no change in state across this many consecutive iterations. @@ -10,6 +11,7 @@ const MAX_TXNS_NOT_INCREMENTING: u8 = 5; #[tokio::test(flavor = "multi_thread")] async fn test_smoke() -> Result<()> { + setup_test(); let start = Instant::now(); dotenvy::dotenv()?; @@ -78,5 +80,18 @@ async fn test_smoke() -> Result<()> { last = new; } + + let epoch = testing + .espresso + .current_epoch() + .await? + .context("curr epoch")?; + + tracing::info!("epoch before stake table update {epoch:?}"); + + if epoch > 1 { + tracing::info!("testing stake table update"); + test_stake_table_update(testing.sequencer_clients).await?; + } Ok(()) } diff --git a/tests/upgrades.rs b/tests/upgrades.rs index 93e61bf347..d074ee35ac 100644 --- a/tests/upgrades.rs +++ b/tests/upgrades.rs @@ -1,13 +1,8 @@ -use std::{path::Path, time::Duration}; - -use crate::common::TestConfig; +use crate::common::{test_stake_table_update, TestConfig}; use anyhow::Result; use client::SequencerClient; -use dotenvy::var; use espresso_types::{EpochVersion, FeeVersion, MarketplaceVersion}; use futures::{future::join_all, StreamExt}; -use sequencer_utils::stake_table::{update_stake_table, PermissionedStakeTableUpdate}; -use tokio::time::sleep; use vbs::version::{StaticVersionType, Version}; const SEQUENCER_BLOCKS_TIMEOUT: u64 = 200; @@ -108,62 +103,3 @@ async fn test_blocks_production(clients: Vec, from: u64, num: u Ok(()) } - -async fn test_stake_table_update(clients: Vec) -> Result<()> { - /* - EPOCH V3 - */ - - let rpc_url = var("ESPRESSO_SEQUENCER_L1_PROVIDER")?; - let account_index = var("ESPRESSO_DEPLOYER_ACCOUNT_INDEX")?; - let contract_address = var("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")?; - let initial_stake_table_path = var("ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")?; - - let permissioned_stake_table = - PermissionedStakeTableUpdate::from_toml_file(Path::new(&initial_stake_table_path))?; - - // initial stake table has 5 new stakers - - let new_stakers = permissioned_stake_table.new_stakers; - //lets remove one - let staker_removed = new_stakers[0].clone(); - - let st_with_one_removed = - PermissionedStakeTableUpdate::new(vec![staker_removed.clone()], vec![]); - let client = clients[0].clone(); - - let epoch_before_update = client.current_epoch().await?; - - update_stake_table( - rpc_url.parse()?, - Duration::from_secs(7), - "test test test test test test test test test test test junk".to_string(), - account_index.parse()?, - contract_address.parse()?, - st_with_one_removed, - ) - .await?; - - loop { - sleep(Duration::from_secs(10)).await; - let epoch = clients[0].current_epoch().await?; - - if epoch > epoch_before_update { - let stake_table = client.stake_table(epoch).await?; - assert_eq!(stake_table.len(), 4); - - assert!( - stake_table - .iter() - .all(|st| st.stake_key != staker_removed.stake_table_key), - "Entry for {} already exists in the stake table", - staker_removed.stake_table_key - ); - - break; - } - } - // TODO: randomize this test - - Ok(()) -} diff --git a/types/Cargo.toml b/types/Cargo.toml index 40c10d304a..7828de6db7 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -31,6 +31,7 @@ hotshot = { workspace = true } hotshot-contract-adapter = { workspace = true } hotshot-query-service = { workspace = true } hotshot-types = { workspace = true } +indexmap = "2.7" itertools = { workspace = true } jf-merkle-tree = { workspace = true } jf-utils = { workspace = true } # TODO temporary: used only for test_rng() diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 990e5b6142..458e5bdcbb 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -19,7 +19,7 @@ use hotshot_types::{ }, PeerConfig, }; -use itertools::Itertools; +use indexmap::IndexMap; use std::{ cmp::max, collections::{BTreeSet, HashMap}, @@ -49,43 +49,36 @@ impl StakeTables { /// should not significantly affect performance to fetch all events and /// perform the computation in this functions once per epoch. pub fn from_l1_events(updates: Vec) -> Self { - let changes_per_node = updates - .into_iter() - .flat_map(|event| { - event - .removed - .into_iter() - .map(|key| StakeTableChange::Remove(bls_alloy_to_jf(key))) - .chain( - event - .added - .into_iter() - .map(|node_info| StakeTableChange::Add(node_info.into())), - ) - }) - .group_by(|change| change.key()); + let mut index_map = IndexMap::new(); - // If the last event for a stakers is `Added` the staker is currently - // staking, if the last event is removed or (or the staker is not present) - // they are not staking. - let currently_staking = changes_per_node - .into_iter() - .map(|(_pub_key, deltas)| deltas.last().expect("deltas non-empty").clone()) - .filter_map(|change| match change { - StakeTableChange::Add(node_info) => Some(node_info), - StakeTableChange::Remove(_) => None, - }); - - let mut consensus_stake_table: Vec> = vec![]; - let mut da_members: Vec> = vec![]; - for node in currently_staking { - consensus_stake_table.push(node.clone().into()); - if node.da { - da_members.push(node.into()); + for event in updates { + for key in event.removed { + let change = StakeTableChange::Remove(bls_alloy_to_jf(key)); + index_map.insert(change.key(), change); + } + for node_info in event.added { + let change = StakeTableChange::Add(node_info.into()); + index_map.insert(change.key(), change); } } - Self::new(consensus_stake_table.into(), da_members.into()) + let mut da_members = Vec::new(); + let mut stake_table = Vec::new(); + + for change in index_map.values() { + if let StakeTableChange::Add(node_info_jf) = change { + let entry: StakeTableEntry = node_info_jf.clone().into(); + stake_table.push(entry.clone()); + if change.is_da() { + da_members.push(entry); + } + } + } + + tracing::error!("DA={da_members:?}"); + tracing::error!("ST={stake_table:?}"); + + Self::new(stake_table.into(), da_members.into()) } } @@ -121,6 +114,13 @@ impl StakeTableChange { StakeTableChange::Remove(key) => *key, } } + + pub(crate) fn is_da(&self) -> bool { + match self { + StakeTableChange::Add(node_info) => node_info.da, + StakeTableChange::Remove(_) => false, + } + } } /// Holds Stake table and da stake From ced65a19c4e05aa858968cfe6ad8d8ff9301e877 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 19 Feb 2025 21:24:03 +0500 Subject: [PATCH 084/120] remove bid_recipient from config --- data/genesis/demo-pos.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/data/genesis/demo-pos.toml b/data/genesis/demo-pos.toml index 3f99ded1e1..db0e4db50c 100644 --- a/data/genesis/demo-pos.toml +++ b/data/genesis/demo-pos.toml @@ -28,6 +28,5 @@ chain_id = 999999999 max_block_size = '1mb' base_fee = '1 wei' fee_recipient = "0x0000000000000000000000000000000000000000" -bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" stake_table_contract = "0xb19b36b1456e65e3a6d514d3f715f204bd59f431" \ No newline at end of file From 88d4a2e6d11df406c07cc816dcd086c2b9256f0d Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 19 Feb 2025 22:16:58 +0500 Subject: [PATCH 085/120] preload epoch staketables --- types/src/v0/impls/stake_table.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 458e5bdcbb..adeb989967 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -189,8 +189,10 @@ impl EpochCommittees { }; self.state.insert(epoch, committee.clone()); - self.state.insert(epoch + 1, committee.clone()); - self.state.insert(epoch + 2, committee.clone()); + for i in epoch.u64()..=epoch.u64() + 15 { + self.state.insert(EpochNumber::new(i + 1), committee.clone()); + } + committee } @@ -246,7 +248,7 @@ impl EpochCommittees { // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 and 2 let mut map = HashMap::new(); - for epoch in Epoch::genesis().u64()..=10 { + for epoch in Epoch::genesis().u64()..=50 { map.insert(Epoch::new(epoch), members.clone()); } From e3050130e99593da754abbdde6813787c3c2e6e6 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 20 Feb 2025 17:05:17 +0500 Subject: [PATCH 086/120] get contract address from chain config and do catchup --- data/genesis/demo-pos.toml | 4 +- sequencer/src/api/endpoints.rs | 7 +- types/src/v0/impls/stake_table.rs | 123 +++++++++++++----------------- 3 files changed, 55 insertions(+), 79 deletions(-) diff --git a/data/genesis/demo-pos.toml b/data/genesis/demo-pos.toml index db0e4db50c..cf0b37596a 100644 --- a/data/genesis/demo-pos.toml +++ b/data/genesis/demo-pos.toml @@ -19,8 +19,8 @@ number = 0 [[upgrade]] version = "0.3" -start_proposing_view = 65 -stop_proposing_view = 120 +start_proposing_view = 10 +stop_proposing_view = 50 [upgrade.epoch] [upgrade.epoch.chain_config] diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index b6d7fc35db..aac4d4f281 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -218,12 +218,7 @@ where .boxed() })? .at("current_epoch", |_, state| { - async move { - Ok(state - .read(|state| state.get_current_epoch().boxed()) - .await) - } - .boxed() + async move { Ok(state.read(|state| state.get_current_epoch().boxed()).await) }.boxed() })?; Ok(api) diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index adeb989967..d1b7f7c585 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -1,11 +1,14 @@ use super::{ + traits::StateCatchup, v0_3::{DAMembers, StakeTable, StakeTables}, + v0_99::ChainConfig, Header, L1Client, NodeState, PubKey, SeqTypes, }; use async_trait::async_trait; +use committable::Committable; use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::StakersUpdated; -use ethers::types::{Address, U256}; +use ethers::types::U256; use ethers_conv::ToAlloy; use hotshot::types::{BLSPubKey, SignatureKey as _}; use hotshot_contract_adapter::stake_table::{bls_alloy_to_jf, NodeInfoJf}; @@ -24,12 +27,10 @@ use std::{ cmp::max, collections::{BTreeSet, HashMap}, num::NonZeroU64, - str::FromStr, + sync::Arc, }; use thiserror::Error; -use url::Url; - type Epoch = ::Epoch; impl StakeTables { @@ -82,7 +83,7 @@ impl StakeTables { } } -#[derive(Clone, Debug)] +#[derive(derive_more::Debug, Clone)] /// Type to describe DA and Stake memberships pub struct EpochCommittees { /// Committee used when we're in pre-epoch state @@ -97,8 +98,9 @@ pub struct EpochCommittees { /// L1 provider l1_client: L1Client, - /// Address of Stake Table Contract - contract_address: Option
, + chain_config: ChainConfig, + #[debug("{}", peers.name())] + pub peers: Arc, } #[derive(Debug, Clone, PartialEq)] @@ -189,9 +191,8 @@ impl EpochCommittees { }; self.state.insert(epoch, committee.clone()); - for i in epoch.u64()..=epoch.u64() + 15 { - self.state.insert(EpochNumber::new(i + 1), committee.clone()); - } + self.state.insert(epoch + 1, committee.clone()); + self.state.insert(epoch + 2, committee.clone()); committee } @@ -252,14 +253,13 @@ impl EpochCommittees { map.insert(Epoch::new(epoch), members.clone()); } - let address = instance_state.chain_config.stake_table_contract; - Self { non_epoch_committee: members, state: map, _epoch_size: epoch_size, l1_client: instance_state.l1_client.clone(), - contract_address: address, + chain_config: instance_state.chain_config, + peers: instance_state.peers.clone(), } } @@ -284,65 +284,11 @@ impl Membership for EpochCommittees { fn new( // TODO remove `new` from trait and remove this fn as well. // https://github.com/EspressoSystems/HotShot/commit/fcb7d54a4443e29d643b3bbc53761856aef4de8b - committee_members: Vec>, - da_members: Vec>, + _committee_members: Vec>, + _da_members: Vec>, ) -> Self { - // For each eligible leader, get the stake table entry - let eligible_leaders: Vec<_> = committee_members - .iter() - .map(|member| member.stake_table_entry.clone()) - .filter(|entry| entry.stake() > U256::zero()) - .collect(); - - // For each member, get the stake table entry - let stake_table: Vec<_> = committee_members - .iter() - .map(|member| member.stake_table_entry.clone()) - .filter(|entry| entry.stake() > U256::zero()) - .collect(); - - // For each member, get the stake table entry - let da_members: Vec<_> = da_members - .iter() - .map(|member| member.stake_table_entry.clone()) - .filter(|entry| entry.stake() > U256::zero()) - .collect(); - - // Index the stake table by public key - let indexed_stake_table: HashMap = stake_table - .iter() - .map(|entry| (PubKey::public_key(entry), entry.clone())) - .collect(); - - // Index the stake table by public key - let indexed_da_members: HashMap = da_members - .iter() - .map(|entry| (PubKey::public_key(entry), entry.clone())) - .collect(); - - let members = Committee { - eligible_leaders, - stake_table, - da_members, - indexed_stake_table, - indexed_da_members, - }; - - let mut map = HashMap::new(); - map.insert(Epoch::genesis(), members.clone()); - // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 - map.insert(Epoch::genesis() + 1u64, members.clone()); - - Self { - non_epoch_committee: members, - state: map, - _epoch_size: 12, - l1_client: L1Client::new(vec![Url::from_str("http:://ab.b").unwrap()]) - .expect("Failed to create L1 client"), - contract_address: None, - } + panic!("EpochCommittees::new() called. This function has been replaced with new_stake()"); } - /// Get the stake table for the current view fn stake_table(&self, epoch: Option) -> Vec> { let st = if let Some(st) = self.state(&epoch) { @@ -513,7 +459,17 @@ impl Membership for EpochCommittees { epoch: Epoch, block_header: Header, ) -> Option> { - let address = self.contract_address?; + let chain_config = get_chain_config(self.chain_config, &self.peers, &block_header) + .await + .ok()?; + + let contract_address = chain_config.stake_table_contract; + + if contract_address.is_none() { + tracing::error!("No stake table contract address found in Chain config"); + } + + let address = contract_address?; self.l1_client .get_stake_table(address.to_alloy(), block_header.l1_head()) @@ -526,6 +482,31 @@ impl Membership for EpochCommittees { }) } } + +pub(crate) async fn get_chain_config( + chain_config: ChainConfig, + peers: &impl StateCatchup, + header: &Header, +) -> anyhow::Result { + let header_cf = header.chain_config(); + if chain_config.commit() == header_cf.commit() { + return Ok(chain_config); + } + + let cf = match header_cf.resolve() { + Some(cf) => cf, + None => peers + .fetch_chain_config(header_cf.commit()) + .await + .map_err(|err| { + tracing::error!("failed to get chain_config from peers. err: {err:?}"); + err + })?, + }; + + Ok(cf) +} + #[cfg(test)] mod tests { use contract_bindings_alloy::permissionedstaketable::PermissionedStakeTable::NodeInfo; From 727e7513e92dcbb3ce9391ad32b0646a4e498a45 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 21 Feb 2025 02:26:32 +0500 Subject: [PATCH 087/120] lockfile --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9889d47bb1..62c6703bcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5296,7 +5296,7 @@ dependencies = [ "refinery", "refinery-core", "reqwest 0.12.12", - "semver 1.0.24", + "semver 1.0.25", "serde", "serde_json", "snafu 0.8.5", @@ -9992,7 +9992,7 @@ dependencies = [ "rand_chacha 0.3.1", "rand_distr", "reqwest 0.12.12", - "semver 1.0.24", + "semver 1.0.25", "sequencer", "sequencer-utils", "serde", From ff9ed3a1067b0d50d59f6067e2aa58d6533a1087 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 21 Feb 2025 02:51:11 +0500 Subject: [PATCH 088/120] clippy --- hotshot-query-service/src/data_source/fetching.rs | 2 +- hotshot-query-service/src/data_source/fetching/leaf.rs | 2 +- sequencer/src/api/sql.rs | 1 - sequencer/src/persistence/fs.rs | 2 +- sequencer/src/persistence/sql.rs | 2 +- 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/hotshot-query-service/src/data_source/fetching.rs b/hotshot-query-service/src/data_source/fetching.rs index 9042a9e62b..d6a7ce8012 100644 --- a/hotshot-query-service/src/data_source/fetching.rs +++ b/hotshot-query-service/src/data_source/fetching.rs @@ -1751,7 +1751,7 @@ impl Heights { } fn might_exist(self, h: u64) -> bool { - h < self.height && self.pruned_height.map_or(true, |ph| h > ph) + h < self.height && self.pruned_height.is_none_or(|ph| h > ph) } } diff --git a/hotshot-query-service/src/data_source/fetching/leaf.rs b/hotshot-query-service/src/data_source/fetching/leaf.rs index 0e1a02b7cd..3692d01851 100644 --- a/hotshot-query-service/src/data_source/fetching/leaf.rs +++ b/hotshot-query-service/src/data_source/fetching/leaf.rs @@ -249,7 +249,7 @@ pub(super) fn trigger_fetch_for_parent( Ok(mut tx) => { // Don't bother fetching a pruned leaf. if let Ok(pruned_height) = tx.load_pruned_height().await { - if !pruned_height.map_or(true, |ph| height > ph) { + if pruned_height.is_some_and(|ph| height <= ph) { tracing::info!( height, ?pruned_height, diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index f08bd52eb2..0976dc7240 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -202,7 +202,6 @@ impl ChainConfigPersistence for Transaction { [(commitment.to_string(), data)], ) .await - .map_err(Into::into) } } diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index bc01469a8c..f6b1946511 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -367,7 +367,7 @@ impl Inner { let info = LeafInfo { leaf, - vid_share: vid_share.map(Into::into), + vid_share, // Note: the following fields are not used in Decide event processing, and should be // removed. For now, we just default them. diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index 926583fa4f..0bf457457d 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -767,7 +767,7 @@ impl Persistence { LeafInfo { leaf, - vid_share: vid_share.map(Into::into), + vid_share, // Note: the following fields are not used in Decide event processing, and // should be removed. For now, we just default them. state: Default::default(), From ab54402a373e54c777b3f6e47f8ce92f2a53d2d9 Mon Sep 17 00:00:00 2001 From: Abdul Basit Date: Fri, 21 Feb 2025 03:19:19 +0500 Subject: [PATCH 089/120] sqlite lockfile --- sequencer-sqlite/Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 740153217c..b0e8976e91 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -4986,7 +4986,7 @@ dependencies = [ "prometheus", "refinery", "refinery-core", - "semver 1.0.24", + "semver 1.0.25", "serde", "serde_json", "snafu 0.8.5", @@ -9408,7 +9408,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_distr", - "semver 1.0.24", + "semver 1.0.25", "sequencer-utils", "serde", "serde_json", @@ -12504,9 +12504,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", From e6558a41134df368fd33954869e1975af6f0b882 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 24 Feb 2025 15:33:16 +0500 Subject: [PATCH 090/120] set epoch height in genesis file --- data/genesis/demo-pos-base.toml | 2 +- data/genesis/demo-pos.toml | 1 + data/v3/messages.bin | Bin 7404 -> 7405 bytes data/v3/messages.json | 3 +- sequencer/src/api.rs | 63 ++++++++++++++++---------------- sequencer/src/genesis.rs | 1 + sequencer/src/lib.rs | 5 ++- sequencer/src/restart_tests.rs | 1 + sequencer/src/run.rs | 1 + 9 files changed, 41 insertions(+), 36 deletions(-) diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml index 8d174799a1..d909148ef6 100644 --- a/data/genesis/demo-pos-base.toml +++ b/data/genesis/demo-pos-base.toml @@ -1,5 +1,6 @@ base_version = "0.3" upgrade_version = "0.3" +epoch_height = 10 [stake_table] capacity = 10 @@ -9,7 +10,6 @@ chain_id = 999999999 max_block_size = '1mb' base_fee = '1 wei' fee_recipient = "0x0000000000000000000000000000000000000000" -# bid_recipient = "0x0000000000000000000000000000000000000000" fee_contract = "0xa15bb66138824a1c7167f5e85b957d04dd34e468" stake_table_contract = "0xb19b36b1456e65e3a6d514d3f715f204bd59f431" diff --git a/data/genesis/demo-pos.toml b/data/genesis/demo-pos.toml index cf0b37596a..6414cab0d2 100644 --- a/data/genesis/demo-pos.toml +++ b/data/genesis/demo-pos.toml @@ -1,5 +1,6 @@ base_version = "0.2" upgrade_version = "0.3" +epoch_height = 10 [stake_table] capacity = 10 diff --git a/data/v3/messages.bin b/data/v3/messages.bin index b07e615a29611c080714f53adc33395bd6aefc38..ba47afc646765941eada03bcf3a1038dc049a5bc 100644 GIT binary patch delta 20 ccmaE3`POoSDdXhl60(zziz{v}Vyuz|0ATY7E&u=k delta 20 ccmaEB`NndCDdXgq5;Budh%0U`W~`D10AUXZF#rGn diff --git a/data/v3/messages.json b/data/v3/messages.json index fcf77f9592..eed2d4964f 100644 --- a/data/v3/messages.json +++ b/data/v3/messages.json @@ -18,7 +18,8 @@ "chain_id": "35353", "fee_contract": null, "fee_recipient": "0x0000000000000000000000000000000000000000", - "max_block_size": "30720" + "max_block_size": "30720", + "stake_table_contract": null } } }, diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 8a59189e88..208e41b8f5 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1576,9 +1576,8 @@ mod test { use espresso_types::{ traits::NullEventConsumer, v0_1::{UpgradeMode, ViewBasedUpgrade}, - BackoffParams, FeeAccount, FeeAmount, FeeVersion, Header, MarketplaceVersion, - MockSequencerVersions, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, - UpgradeType, ValidatedState, + BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, + SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, ValidatedState, }; use ethers::utils::Anvil; use futures::{ @@ -2138,35 +2137,35 @@ mod test { handle.abort(); } - #[tokio::test(flavor = "multi_thread")] - async fn test_pos_upgrade_view_based() { - setup_test(); - - let mut upgrades = std::collections::BTreeMap::new(); - type MySequencerVersions = SequencerVersions; - - let mode = UpgradeMode::View(ViewBasedUpgrade { - start_voting_view: None, - stop_voting_view: None, - start_proposing_view: 1, - stop_proposing_view: 10, - }); - - let upgrade_type = UpgradeType::Epoch { - chain_config: ChainConfig { - max_block_size: 500.into(), - base_fee: 2.into(), - stake_table_contract: Some(Default::default()), - ..Default::default() - }, - }; - - upgrades.insert( - ::Upgrade::VERSION, - Upgrade { mode, upgrade_type }, - ); - test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; - } + // #[tokio::test(flavor = "multi_thread")] + // async fn test_pos_upgrade_view_based() { + // setup_test(); + + // let mut upgrades = std::collections::BTreeMap::new(); + // type MySequencerVersions = SequencerVersions; + + // let mode = UpgradeMode::View(ViewBasedUpgrade { + // start_voting_view: None, + // stop_voting_view: None, + // start_proposing_view: 1, + // stop_proposing_view: 10, + // }); + + // let upgrade_type = UpgradeType::Epoch { + // chain_config: ChainConfig { + // max_block_size: 500.into(), + // base_fee: 2.into(), + // stake_table_contract: Some(Default::default()), + // ..Default::default() + // }, + // }; + + // upgrades.insert( + // ::Upgrade::VERSION, + // Upgrade { mode, upgrade_type }, + // ); + // test_upgrade_helper::(upgrades, MySequencerVersions::new()).await; + // } #[tokio::test(flavor = "multi_thread")] async fn test_marketplace_upgrade_view_based() { diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index b1ddf0838e..790a8d6193 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -53,6 +53,7 @@ pub struct Genesis { pub base_version: Version, #[serde(with = "version_ser")] pub upgrade_version: Version, + pub epoch_height: Option, pub chain_config: ChainConfig, pub stake_table: StakeTableConfig, #[serde(default)] diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 79eb77c133..5f8c6553fd 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -363,8 +363,9 @@ pub async fn init_node( upgrade.set_hotshot_config_parameters(&mut network_config.config); } - //todo(abdul): get from genesis file - network_config.config.epoch_height = 10; + let epoch_height = genesis.epoch_height.unwrap_or_default(); + tracing::info!("setting epoch height={epoch_height:?}"); + network_config.config.epoch_height = epoch_height; // If the `Libp2p` bootstrap nodes were supplied via the command line, override those // present in the config file. diff --git a/sequencer/src/restart_tests.rs b/sequencer/src/restart_tests.rs index 7631120994..4c6c5a82c5 100755 --- a/sequencer/src/restart_tests.rs +++ b/sequencer/src/restart_tests.rs @@ -543,6 +543,7 @@ impl TestNetwork { upgrades: Default::default(), base_version: Version { major: 0, minor: 1 }, upgrade_version: Version { major: 0, minor: 2 }, + epoch_height: None, // Start with a funded account, so we can test catchup after restart. accounts: [(builder_account(), 1000000000.into())] diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 53aa25291a..568716a268 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -293,6 +293,7 @@ mod test { upgrades: Default::default(), base_version: Version { major: 0, minor: 1 }, upgrade_version: Version { major: 0, minor: 2 }, + epoch_height: None, }; genesis.to_file(&genesis_file).unwrap(); From c1b1e223915d0beaf04ac5ff4b487460cdb84eda Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 24 Feb 2025 15:58:43 +0500 Subject: [PATCH 091/120] lockfile --- Cargo.lock | 2 +- sequencer-sqlite/Cargo.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2eb50a70f..9afec16fa3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3727,7 +3727,7 @@ dependencies = [ "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", - "indexmap 2.7.0", + "indexmap 2.7.1", "itertools 0.12.1", "jf-merkle-tree", "jf-utils", diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index fc7b0bbacb..c5ca0e1b2f 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -3553,7 +3553,7 @@ dependencies = [ "hotshot-contract-adapter", "hotshot-query-service", "hotshot-types", - "indexmap 2.7.0", + "indexmap 2.7.1", "itertools 0.12.1", "jf-merkle-tree", "jf-utils", From e69b5f2c125f73ead5788c004ad724ee705c3c4f Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 24 Feb 2025 20:17:47 +0500 Subject: [PATCH 092/120] load initial stake table from config --- Cargo.toml | 8 +++ client/src/lib.rs | 19 ++++- flake.nix | 8 ++- hotshot-types/src/data.rs | 2 +- sequencer/api/node.toml | 9 +++ sequencer/src/api.rs | 38 ++++++++++ sequencer/src/api/data_source.rs | 9 +++ sequencer/src/api/endpoints.rs | 26 +++++++ .../bin/update-permissioned-stake-table.rs | 71 +++++++++++++++++-- sequencer/src/lib.rs | 7 -- utils/src/stake_table.rs | 24 +------ 11 files changed, 182 insertions(+), 39 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7c7d9278b3..b80c04588a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -224,6 +224,14 @@ paste = "1.0" rand = "0.8.5" time = "0.3" trait-set = "0.3.0" + +[profile.dev] +# No optimizations +opt-level = 0 +# Skip compiling the debug information. +debug = false +# Skip linking symbols. +strip = true [profile.test] opt-level = 1 [profile.test.package.tests] diff --git a/client/src/lib.rs b/client/src/lib.rs index 13a54afce4..703fe639e4 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -10,6 +10,7 @@ use jf_merkle_tree::{ use std::time::Duration; use surf_disco::{ error::ClientError, + http::convert::DeserializeOwned, socket::{Connection, Unsupported}, Url, }; @@ -134,7 +135,23 @@ impl SequencerClient { .get::<_>(&format!("node/stake-table/{epoch}")) .send() .await - .context("getting epoch value") + .context("getting stake table") + } + + pub async fn da_members(&self, epoch: u64) -> anyhow::Result>> { + self.0 + .get::<_>(&format!("node/stake-table/da/{epoch}")) + .send() + .await + .context("getting da stake table") + } + + pub async fn config(&self) -> anyhow::Result { + self.0 + .get::(&format!("config/hotshot")) + .send() + .await + .context("getting hotshot config") } } diff --git a/flake.nix b/flake.nix index be4c42ecc4..c4e74ad5fe 100644 --- a/flake.nix +++ b/flake.nix @@ -67,7 +67,13 @@ solhintPkg { inherit (prev) buildNpmPackage fetchFromGitHub; }; }) - + # The mold linker is around 50% faster on Linux than the default linker. + # This overlays a mkShell that is configured to use mold on Linux. + (final: prev: prev.lib.optionalAttrs prev.stdenv.isLinux { + mkShell = prev.mkShell.override { + stdenv = prev.stdenvAdapters.useMoldLinker prev.clangStdenv; + }; + }) ]; pkgs = import nixpkgs { inherit system overlays; }; crossShell = { config }: diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index ed8e7ceba2..1227b43fc5 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -138,7 +138,7 @@ impl_u64_wrapper!(EpochNumber); impl EpochNumber { /// Create a genesis number (1) #[allow(dead_code)] - fn genesis() -> Self { + pub fn genesis() -> Self { Self(1) } } diff --git a/sequencer/api/node.toml b/sequencer/api/node.toml index d2025500f7..1ad3d4a05c 100644 --- a/sequencer/api/node.toml +++ b/sequencer/api/node.toml @@ -2,11 +2,20 @@ PATH = ["stake-table/current"] DOC = "Get the stake table for the current epoch" +[route.da_members_current] +PATH = ["stake-table/da/current"] +DOC = "Get the stake table da members for the current epoch" + [route.stake_table] PATH = ["stake-table/:epoch_number"] ":epoch_number" = "Integer" DOC = "Get the stake table for the given epoch" +[route.da_members] +PATH = ["stake-table/da/:epoch_number"] +":epoch_number" = "Integer" +DOC = "Get the stake table da members for the given epoch" + [route.current_epoch] PATH = ["current_epoch"] DOC = "Get the current epoch" \ No newline at end of file diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 208e41b8f5..4c26e3a660 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -171,6 +171,14 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> self.as_ref().get_stake_table(epoch).await } + /// Get the stake table for a given epoch + async fn get_da_members( + &self, + epoch: Option<::Epoch>, + ) -> Vec::SignatureKey>> { + self.as_ref().get_da_members(epoch).await + } + /// Get the stake table for the current epoch if not provided async fn get_stake_table_current( &self, @@ -178,6 +186,13 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> self.as_ref().get_stake_table_current().await } + /// Get the stake table for the current epoch if not provided + async fn get_da_members_current( + &self, + ) -> Vec::SignatureKey>> { + self.as_ref().get_da_members_current().await + } + /// Get the stake table for the current epoch if not provided async fn get_current_epoch(&self) -> Option<::Epoch> { self.as_ref().get_current_epoch().await @@ -214,6 +229,29 @@ impl, V: Versions, P: SequencerPersistence> async fn get_current_epoch(&self) -> Option<::Epoch> { self.consensus().await.read().await.cur_epoch().await } + + async fn get_da_members( + &self, + epoch: Option<::Epoch>, + ) -> Vec::SignatureKey>> { + self.consensus() + .await + .read() + .await + .memberships + .read() + .await + .da_stake_table(epoch) + } + + /// Get the stake table for the current epoch if not provided + async fn get_da_members_current( + &self, + ) -> Vec::SignatureKey>> { + let epoch = self.consensus().await.read().await.cur_epoch().await; + + self.get_da_members(epoch).await + } } impl, V: Versions, P: SequencerPersistence> SubmitDataSource diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index 52754e2548..a3372a754b 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -129,6 +129,15 @@ pub(crate) trait StakeTableDataSource { ) -> impl Send + Future>>; fn get_current_epoch(&self) -> impl Send + Future>; + + fn get_da_members( + &self, + epoch: Option<::Epoch>, + ) -> impl Send + Future::SignatureKey>>>; + /// Get the stake table for the current epoch if not provided + fn get_da_members_current( + &self, + ) -> impl Send + Future::SignatureKey>>>; } pub(crate) trait CatchupDataSource: Sync { diff --git a/sequencer/src/api/endpoints.rs b/sequencer/src/api/endpoints.rs index aac4d4f281..95c4971030 100644 --- a/sequencer/src/api/endpoints.rs +++ b/sequencer/src/api/endpoints.rs @@ -219,6 +219,32 @@ where })? .at("current_epoch", |_, state| { async move { Ok(state.read(|state| state.get_current_epoch().boxed()).await) }.boxed() + })? + .at("da_members", |req, state| { + async move { + // Try to get the epoch from the request. If this fails, error + // as it was probably a mistake + let epoch = req + .opt_integer_param("epoch_number") + .map_err(|_| hotshot_query_service::node::Error::Custom { + message: "Epoch number is required".to_string(), + status: StatusCode::BAD_REQUEST, + })? + .map(EpochNumber::new); + + Ok(state + .read(|state| state.get_da_members(epoch).boxed()) + .await) + } + .boxed() + })? + .at("da_members_current", |_, state| { + async move { + Ok(state + .read(|state| state.get_da_members_current().boxed()) + .await) + } + .boxed() })?; Ok(api) diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index 890080ef3e..bae660784c 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -1,12 +1,16 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use clap::Parser; +use client::SequencerClient; use espresso_types::parse_duration; use ethers::types::Address; +use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType}; +use sequencer::api::data_source::PublicHotShotConfig; use sequencer_utils::{ logging, stake_table::{update_stake_table, PermissionedStakeTableUpdate}, }; use std::{path::PathBuf, time::Duration}; + use url::Url; #[derive(Debug, Clone, Parser)] @@ -78,7 +82,28 @@ struct Options { env = "ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_UPDATE_TOML_PATH", verbatim_doc_comment )] - update_toml_path: PathBuf, + update_toml_path: Option, + /// Flag to update the contract with the initial stake table. + /// + /// This stake table is fetched directly from hotshot config, and is pre-epoch stake table + + #[clap( + long, + short, + env = "ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_INITIAL", + default_value_t = false + )] + initial: bool, + + /// Peer nodes use to fetch missing state + #[clap( + long, + env = "ESPRESSO_SEQUENCER_STATE_PEERS", + value_delimiter = ',', + conflicts_with = "update_toml_path" + )] + pub state_peers: Option>, + #[clap(flatten)] logging: logging::Config, } @@ -88,10 +113,44 @@ async fn main() -> Result<()> { let opts = Options::parse(); opts.logging.init(); - let path = opts.update_toml_path; + let mut update: Option = None; + + if opts.initial { + let peers = opts.state_peers.context("No state peers found")?; + let clients: Vec = peers.into_iter().map(SequencerClient::new).collect(); + + for client in &clients { + tracing::warn!("calling config endpoint of {client:?}"); + + match client.config::().await { + Ok(config) => { + let hotshot = config.into_hotshot_config(); + let st = hotshot.known_nodes_with_stake; + let da_nodes = hotshot.known_da_nodes; + + let new_stakers = st + .into_iter() + .map(|s| PeerConfigKeys { + stake_table_key: s.stake_table_entry.stake_key.clone(), + state_ver_key: s.state_ver_key.clone(), + stake: s.stake_table_entry.stake().as_u64(), + da: da_nodes.contains(&s), + }) + .collect(); - tracing::error!("updating stake table from path: {path:?}"); - let update = PermissionedStakeTableUpdate::from_toml_file(&path)?; + update = Some(PermissionedStakeTableUpdate::new(new_stakers, Vec::new())); + break; + } + Err(e) => { + tracing::warn!("Failed to fetch config from sequencer: {e}"); + } + }; + } + } else { + let path = opts.update_toml_path.context("No update path found")?; + tracing::error!("updating stake table from path: {path:?}"); + update = Some(PermissionedStakeTableUpdate::from_toml_file(&path)?); + }; update_stake_table( opts.rpc_url, @@ -99,7 +158,7 @@ async fn main() -> Result<()> { opts.mnemonic, opts.account_index, opts.contract_address, - update, + update.unwrap(), ) .await?; diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 5f8c6553fd..d79d16ae13 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -23,7 +23,6 @@ use espresso_types::{ use ethers_conv::ToAlloy; use genesis::L1Finalized; use proposal_fetcher::ProposalFetcherConfig; -use sequencer_utils::stake_table::PermissionedStakeTableUpdate; use std::sync::Arc; use tokio::select; // Should move `STAKE_TABLE_CAPACITY` in the sequencer repo when we have variate stake table support @@ -496,12 +495,6 @@ pub async fn init_node( network_config.config.epoch_height, ); - // save initial stake table into toml file - // this will be helpful to load it into contract - PermissionedStakeTableUpdate::save_initial_stake_table_from_hotshot_config( - network_config.config.clone(), - )?; - // Initialize the Libp2p network let network = { let p2p_network = Libp2pNetwork::from_config( diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index ca20a8680f..6c7fc4bfb6 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -14,9 +14,7 @@ use ethers::{ }; use hotshot::types::BLSPubKey; use hotshot_contract_adapter::stake_table::{bls_jf_to_sol, NodeInfoJf}; -use hotshot_types::{ - network::PeerConfigKeys, traits::signature_key::StakeTableEntryType, HotShotConfig, -}; +use hotshot_types::network::PeerConfigKeys; use url::Url; use std::{fs, path::Path, sync::Arc, time::Duration}; @@ -132,26 +130,6 @@ impl PermissionedStakeTableUpdate { }) .collect() } - - pub fn save_initial_stake_table_from_hotshot_config( - config: HotShotConfig, - ) -> anyhow::Result<()> { - let committee_members = config.known_nodes_with_stake.clone(); - let known_da_nodes = config.known_da_nodes.clone().clone(); - let members = committee_members - .into_iter() - .map(|m| PeerConfigKeys { - stake_table_key: m.stake_table_entry.public_key(), - state_ver_key: m.state_ver_key.clone(), - stake: m.stake_table_entry.stake().as_u64(), - da: known_da_nodes.contains(&m), - }) - .collect(); - - Self::new(members, vec![]).to_toml_file(Path::new("data/initial_stake_table.toml"))?; - - Ok(()) - } } pub async fn update_stake_table( From 8bf12798737052714898df697647fe5a891671b6 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 24 Feb 2025 20:40:30 +0500 Subject: [PATCH 093/120] fix update binary --- process-compose.yaml | 2 +- sequencer/src/api/data_source.rs | 4 + .../bin/update-permissioned-stake-table.rs | 90 +++++++++---------- 3 files changed, 47 insertions(+), 49 deletions(-) diff --git a/process-compose.yaml b/process-compose.yaml index 7baa6767c4..33c7ab8d87 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -117,7 +117,7 @@ processes: command: update-permissioned-stake-table environment: - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS - - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_UPDATE_TOML_PATH=data/initial_stake_table.toml + - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT depends_on: deploy-prover-contracts: condition: process_completed diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index a3372a754b..b8afaa5b95 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -435,6 +435,10 @@ impl PublicNetworkConfig { public_keys: Vec::new(), }) } + + pub fn hotshot_config(&self) -> PublicHotShotConfig { + self.config.clone() + } } #[cfg(any(test, feature = "testing"))] diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index bae660784c..a5a53f6647 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -4,7 +4,7 @@ use client::SequencerClient; use espresso_types::parse_duration; use ethers::types::Address; use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType}; -use sequencer::api::data_source::PublicHotShotConfig; +use sequencer::api::data_source::PublicNetworkConfig; use sequencer_utils::{ logging, stake_table::{update_stake_table, PermissionedStakeTableUpdate}, @@ -83,19 +83,10 @@ struct Options { verbatim_doc_comment )] update_toml_path: Option, - /// Flag to update the contract with the initial stake table. - /// - /// This stake table is fetched directly from hotshot config, and is pre-epoch stake table - - #[clap( - long, - short, - env = "ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_INITIAL", - default_value_t = false - )] - initial: bool, - /// Peer nodes use to fetch missing state + /// Peers for fetching hotshot config + /// used to update the contract with the initial stake table. + /// This stake table is fetched directly from hotshot config, and is pre-epoch stake table #[clap( long, env = "ESPRESSO_SEQUENCER_STATE_PEERS", @@ -115,42 +106,45 @@ async fn main() -> Result<()> { let mut update: Option = None; - if opts.initial { - let peers = opts.state_peers.context("No state peers found")?; - let clients: Vec = peers.into_iter().map(SequencerClient::new).collect(); - - for client in &clients { - tracing::warn!("calling config endpoint of {client:?}"); - - match client.config::().await { - Ok(config) => { - let hotshot = config.into_hotshot_config(); - let st = hotshot.known_nodes_with_stake; - let da_nodes = hotshot.known_da_nodes; - - let new_stakers = st - .into_iter() - .map(|s| PeerConfigKeys { - stake_table_key: s.stake_table_entry.stake_key.clone(), - state_ver_key: s.state_ver_key.clone(), - stake: s.stake_table_entry.stake().as_u64(), - da: da_nodes.contains(&s), - }) - .collect(); - - update = Some(PermissionedStakeTableUpdate::new(new_stakers, Vec::new())); - break; - } - Err(e) => { - tracing::warn!("Failed to fetch config from sequencer: {e}"); - } - }; + match opts.update_toml_path { + Some(path) => { + tracing::error!("updating stake table from path: {path:?}"); + update = Some(PermissionedStakeTableUpdate::from_toml_file(&path)?); + } + None => { + let peers = opts.state_peers.context("No state peers found")?; + let clients: Vec = + peers.into_iter().map(SequencerClient::new).collect(); + + for client in &clients { + tracing::warn!("calling config endpoint of {client:?}"); + + match client.config::().await { + Ok(config) => { + let hotshot = config.hotshot_config().into_hotshot_config(); + let st = hotshot.known_nodes_with_stake; + let da_nodes = hotshot.known_da_nodes; + + let new_stakers = st + .into_iter() + .map(|s| PeerConfigKeys { + stake_table_key: s.stake_table_entry.stake_key.clone(), + state_ver_key: s.state_ver_key.clone(), + stake: s.stake_table_entry.stake().as_u64(), + da: da_nodes.contains(&s), + }) + .collect(); + + update = Some(PermissionedStakeTableUpdate::new(new_stakers, Vec::new())); + break; + } + Err(e) => { + tracing::warn!("Failed to fetch config from sequencer: {e}"); + } + }; + } } - } else { - let path = opts.update_toml_path.context("No update path found")?; - tracing::error!("updating stake table from path: {path:?}"); - update = Some(PermissionedStakeTableUpdate::from_toml_file(&path)?); - }; + } update_stake_table( opts.rpc_url, From 54c89d7b02683c7d842e2d9dff4cdea94e6615fb Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 01:07:51 +0500 Subject: [PATCH 094/120] fix stake table update test --- client/src/lib.rs | 2 +- tests/common/mod.rs | 127 ++++++++++++++++++++++++--------------- tests/smoke.rs | 1 + utils/src/stake_table.rs | 14 +++-- 4 files changed, 90 insertions(+), 54 deletions(-) diff --git a/client/src/lib.rs b/client/src/lib.rs index 703fe639e4..f941d96be7 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -148,7 +148,7 @@ impl SequencerClient { pub async fn config(&self) -> anyhow::Result { self.0 - .get::(&format!("config/hotshot")) + .get::("config/hotshot") .send() .await .context("getting hotshot config") diff --git a/tests/common/mod.rs b/tests/common/mod.rs index cea9bb2904..3adff49579 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -2,8 +2,8 @@ use anyhow::{anyhow, Context, Result}; use client::SequencerClient; use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; use ethers::prelude::*; -use futures::future::join_all; -use std::path::Path; +use futures::future::{join_all, BoxFuture}; +use futures::FutureExt; use std::{fmt, str::FromStr, time::Duration}; use surf_disco::Url; use tokio::time::{sleep, timeout}; @@ -292,58 +292,91 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() let l1_port = var("ESPRESSO_SEQUENCER_L1_PORT")?; let account_index = var("ESPRESSO_DEPLOYER_ACCOUNT_INDEX")?; let contract_address = var("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")?; - let initial_stake_table_path = var("ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH")?; - - let permissioned_stake_table = - PermissionedStakeTableUpdate::from_toml_file(Path::new(&initial_stake_table_path))?; - - // initial stake table has 5 new stakers + let client = clients[0].clone(); + // currently stake table update does not support DA node member changes - let new_stakers = permissioned_stake_table.new_stakers; - //lets remove one - let staker_removed = new_stakers[0].clone(); + let stake_table = client.stake_table(1).await?; + let da_members = client.da_members(1).await?; - let st_with_one_removed = PermissionedStakeTableUpdate::new( + // filtering out DA nodes + let stakers: Vec<_> = stake_table + .into_iter() + .filter(|x| !da_members.contains(x)) + .collect(); + + let assert_change = + move |u: PermissionedStakeTableUpdate| -> BoxFuture<'static, anyhow::Result<()>> { + async move { + let epoch_before_update = client.current_epoch().await?.context("curr epoch")?; + tracing::warn!("current_epoch={epoch_before_update:?}"); + + let current_stake_table = client.stake_table(epoch_before_update).await?; + + let removed = u.stakers_to_remove.len(); + let added = u.new_stakers.len(); + + update_stake_table( + format!("http://localhost:{l1_port}").parse()?, + Duration::from_secs(7), + "test test test test test test test test test test test junk".to_string(), + account_index.parse()?, + contract_address.parse()?, + u.clone(), + ) + .await?; + + loop { + sleep(Duration::from_secs(10)).await; + let epoch = client.current_epoch().await?.context("curr epoch")?; + tracing::info!("current_epoch={epoch:?}"); + if epoch > epoch_before_update + 2 { + let stake_table = client.stake_table(epoch).await?; + tracing::info!("stake_table={stake_table:?}"); + assert_eq!( + stake_table.len(), + current_stake_table.len() + added - removed + ); + + for added in &u.new_stakers { + assert!( + stake_table + .iter() + .any(|st| st.stake_key == added.stake_table_key), + "staker {} not found", + added.stake_table_key + ); + } + + for removed in &u.stakers_to_remove { + assert!( + stake_table + .iter() + .all(|st| st.stake_key != removed.stake_table_key), + "staker {} found", + removed.stake_table_key + ); + } + + break; + } + } + + anyhow::Result::<_>::Ok(()) + } + .boxed() + }; + let node = stakers[0].clone(); + let one_removed = PermissionedStakeTableUpdate::new( vec![], vec![StakerIdentity { - stake_table_key: staker_removed.stake_table_key.clone(), + stake_table_key: node.stake_key.clone(), }], ); - let client = clients[0].clone(); - let epoch_before_update = client.current_epoch().await?.context("curr epoch")?; - tracing::warn!("current_epoch={epoch_before_update:?}"); - update_stake_table( - format!("http://localhost:{l1_port}").parse()?, - Duration::from_secs(7), - "test test test test test test test test test test test junk".to_string(), - account_index.parse()?, - contract_address.parse()?, - st_with_one_removed, - ) - .await?; - - loop { - sleep(Duration::from_secs(10)).await; - let epoch = clients[0].current_epoch().await?.context("curr epoch")?; - tracing::info!("current_epoch={epoch:?}"); - if epoch > epoch_before_update + 6 { - let stake_table = client.stake_table(epoch).await?; - tracing::info!("stake_table={stake_table:?}"); - assert_eq!(stake_table.len(), 4); - - assert!( - stake_table - .iter() - .all(|st| st.stake_key != staker_removed.stake_table_key), - "Entry for {} already exists in the stake table", - staker_removed.stake_table_key - ); - - break; - } - } - // TODO: randomize this test + // remove one node + assert_change(one_removed) + .await + .expect("failed to remove one node"); Ok(()) } diff --git a/tests/smoke.rs b/tests/smoke.rs index 7177a90151..0a0a23cda3 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -89,6 +89,7 @@ async fn test_smoke() -> Result<()> { tracing::info!("epoch before stake table update {epoch:?}"); + // Check if epoch number is greater than Epoch::genesis() i.e 1 if epoch > 1 { tracing::info!("testing stake table update"); test_stake_table_update(testing.sequencer_clients).await?; diff --git a/utils/src/stake_table.rs b/utils/src/stake_table.rs index 6c7fc4bfb6..6cbb96c0d0 100644 --- a/utils/src/stake_table.rs +++ b/utils/src/stake_table.rs @@ -3,7 +3,7 @@ /// The initial stake table is passed to the permissioned stake table contract /// on deployment. use contract_bindings_ethers::permissioned_stake_table::{ - G2Point, NodeInfo, PermissionedStakeTable, + G2Point, NodeInfo, PermissionedStakeTable, PermissionedStakeTableErrors, }; use derive_more::derive::From; use ethers::{ @@ -19,6 +19,8 @@ use url::Url; use std::{fs, path::Path, sync::Arc, time::Duration}; +use crate::contract_send; + /// A stake table config stored in a file #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] #[serde(bound(deserialize = ""))] @@ -158,11 +160,11 @@ pub async fn update_stake_table( anyhow::bail!("No changes to update in the stake table"); } - let tx_receipt = contract - .update(update.stakers_to_remove(), update.new_stakers()) - .send() - .await? - .await?; + let (tx_receipt, _) = contract_send::<_, _, PermissionedStakeTableErrors>( + &contract.update(update.stakers_to_remove(), update.new_stakers()), + ) + .await?; + tracing::info!("Transaction receipt: {:?}", tx_receipt); Ok(()) } From 9bd10ef5f084f49687bb17371f75f470cefe432b Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 01:17:48 +0500 Subject: [PATCH 095/120] clippy --- sequencer/src/bin/update-permissioned-stake-table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index a5a53f6647..37adadba46 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -128,7 +128,7 @@ async fn main() -> Result<()> { let new_stakers = st .into_iter() .map(|s| PeerConfigKeys { - stake_table_key: s.stake_table_entry.stake_key.clone(), + stake_table_key: s.stake_table_entry.stake_key, state_ver_key: s.state_ver_key.clone(), stake: s.stake_table_entry.stake().as_u64(), da: da_nodes.contains(&s), From 51ec2436818a5a66f06731fc45aa7f3485b3ce1e Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 02:19:30 +0500 Subject: [PATCH 096/120] remove l1_provider, mnemonic default values --- process-compose.yaml | 2 ++ .../src/bin/update-permissioned-stake-table.rs | 14 ++------------ tests/common/mod.rs | 2 +- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/process-compose.yaml b/process-compose.yaml index 33c7ab8d87..6a704feffd 100644 --- a/process-compose.yaml +++ b/process-compose.yaml @@ -118,6 +118,8 @@ processes: environment: - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS - ESPRESSO_SEQUENCER_STATE_PEERS=http://localhost:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_ETH_MNEMONIC + - ESPRESSO_SEQUENCER_L1_PROVIDER depends_on: deploy-prover-contracts: condition: process_completed diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index 37adadba46..d0a3bac1c1 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -16,12 +16,7 @@ use url::Url; #[derive(Debug, Clone, Parser)] struct Options { /// RPC URL for the L1 provider. - #[clap( - short, - long, - env = "ESPRESSO_SEQUENCER_L1_PROVIDER", - default_value = "http://localhost:8545" - )] + #[clap(short, long, env = "ESPRESSO_SEQUENCER_L1_PROVIDER")] rpc_url: Url, /// Request rate when polling L1. @@ -37,12 +32,7 @@ struct Options { /// /// This wallet is used to deploy the contracts, so the account indicated by ACCOUNT_INDEX must /// be funded with with ETH. - #[clap( - long, - name = "MNEMONIC", - env = "ESPRESSO_SEQUENCER_ETH_MNEMONIC", - default_value = "test test test test test test test test test test test junk" - )] + #[clap(long, name = "MNEMONIC", env = "ESPRESSO_SEQUENCER_ETH_MNEMONIC")] mnemonic: String, /// Account index in the L1 wallet generated by MNEMONIC to use when deploying the contracts. diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 3adff49579..5bc18547aa 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -369,7 +369,7 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() let one_removed = PermissionedStakeTableUpdate::new( vec![], vec![StakerIdentity { - stake_table_key: node.stake_key.clone(), + stake_table_key: node.stake_key, }], ); From a0614377a33f06a209b2e35754f347855c4a2b3a Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 17:49:40 +0500 Subject: [PATCH 097/120] use config endpoint for stake table updates --- .github/workflows/test.yml | 2 +- Cargo.lock | 2 ++ tests/Cargo.toml | 2 ++ tests/common/mod.rs | 74 ++++++++++++++++++++++++++++---------- tests/smoke.rs | 1 - 5 files changed, 61 insertions(+), 20 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 8c93588ff5..50eedd1211 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -193,7 +193,7 @@ jobs: needs: [build-test-bins, build-test-artifacts-postgres] strategy: matrix: - version: [03] + version: [02, 03, 99] include: - version: 02 compose: "-f process-compose.yaml -D" diff --git a/Cargo.lock b/Cargo.lock index 9afec16fa3..62bce33a43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11161,8 +11161,10 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "reqwest 0.12.12", "sequencer-utils", + "serde", "surf-disco", "tokio", "tracing", diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 69e0e5bef8..e33048117b 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -22,3 +22,5 @@ surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } +hotshot-types = { workspace = true } +serde = { workspace = true } \ No newline at end of file diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 5bc18547aa..99224034f3 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,10 +1,14 @@ use anyhow::{anyhow, Context, Result}; use client::SequencerClient; -use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; +use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion, PubKey}; use ethers::prelude::*; use futures::future::{join_all, BoxFuture}; use futures::FutureExt; +use hotshot_types::network::PeerConfigKeys; +use hotshot_types::traits::signature_key::StakeTableEntryType; +use hotshot_types::PeerConfig; use std::{fmt, str::FromStr, time::Duration}; +use surf_disco::http::convert::Deserialize; use surf_disco::Url; use tokio::time::{sleep, timeout}; use vbs::version::StaticVersionType; @@ -284,28 +288,22 @@ async fn wait_for_service(url: Url, interval: u64, timeout_duration: u64) -> Res .map_err(|e| anyhow!("Wait for service, timeout: ({}) {}", url, e))? } -pub async fn test_stake_table_update(clients: Vec) -> Result<()> { - /* - EPOCH V3 - */ +/* + EPOCH V3 +*/ +pub async fn test_stake_table_update(clients: Vec) -> Result<()> { let l1_port = var("ESPRESSO_SEQUENCER_L1_PORT")?; let account_index = var("ESPRESSO_DEPLOYER_ACCOUNT_INDEX")?; let contract_address = var("ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS")?; let client = clients[0].clone(); - // currently stake table update does not support DA node member changes - - let stake_table = client.stake_table(1).await?; - let da_members = client.da_members(1).await?; - - // filtering out DA nodes - let stakers: Vec<_> = stake_table - .into_iter() - .filter(|x| !da_members.contains(x)) - .collect(); let assert_change = - move |u: PermissionedStakeTableUpdate| -> BoxFuture<'static, anyhow::Result<()>> { + |u: PermissionedStakeTableUpdate| -> BoxFuture<'static, anyhow::Result<()>> { + let client = client.clone(); + let l1_port = l1_port.clone(); + let account_index = account_index.clone(); + let contract_address = contract_address.clone(); async move { let epoch_before_update = client.current_epoch().await?.context("curr epoch")?; tracing::warn!("current_epoch={epoch_before_update:?}"); @@ -365,11 +363,27 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() } .boxed() }; - let node = stakers[0].clone(); + + let config = client.config::().await?.config; + + // currently stake table update does not support DA node member changes + let stake_table = config.known_nodes_with_stake; + let da_members = config.known_da_nodes; + + // filtering out DA nodes + let non_da_stakers: Vec<_> = stake_table + .into_iter() + .filter(|x| !da_members.contains(x)) + .collect(); + + let node = non_da_stakers + .get(0) + .context("failed to get non DA node")? + .clone(); let one_removed = PermissionedStakeTableUpdate::new( vec![], vec![StakerIdentity { - stake_table_key: node.stake_key, + stake_table_key: node.stake_table_entry.stake_key.clone(), }], ); @@ -378,5 +392,29 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() .await .expect("failed to remove one node"); + // add back the removed node + let added = PermissionedStakeTableUpdate::new( + vec![PeerConfigKeys { + stake_table_key: *node.stake_table_entry.key(), + state_ver_key: node.state_ver_key, + stake: node.stake_table_entry.stake().as_u64(), + da: false, + }], + vec![], + ); + + assert_change(added).await.expect("failed to add a node"); + Ok(()) } + +#[derive(Debug, Deserialize)] +struct PublicHotShotConfig { + known_nodes_with_stake: Vec>, + known_da_nodes: Vec>, +} + +#[derive(Debug, Deserialize)] +struct PublicNetworkConfig { + config: PublicHotShotConfig, +} diff --git a/tests/smoke.rs b/tests/smoke.rs index 0a0a23cda3..e009fb3ba3 100644 --- a/tests/smoke.rs +++ b/tests/smoke.rs @@ -32,7 +32,6 @@ async fn test_smoke() -> Result<()> { let mut state_retries = 0; let mut txn_retries = 0; while (sub.next().await).is_some() { - dbg!("next"); let new = testing.test_state().await; println!("New State:{}", new); From fbbe05fae5a8f4dff2a3f6e202008c1422b80090 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 18:30:48 +0500 Subject: [PATCH 098/120] lint --- tests/common/mod.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 99224034f3..f0abe66e0e 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -376,14 +376,11 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() .filter(|x| !da_members.contains(x)) .collect(); - let node = non_da_stakers - .get(0) - .context("failed to get non DA node")? - .clone(); + let node = non_da_stakers.first().context("no non da staker found")?; let one_removed = PermissionedStakeTableUpdate::new( vec![], vec![StakerIdentity { - stake_table_key: node.stake_table_entry.stake_key.clone(), + stake_table_key: node.stake_table_entry.stake_key, }], ); @@ -396,7 +393,7 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() let added = PermissionedStakeTableUpdate::new( vec![PeerConfigKeys { stake_table_key: *node.stake_table_entry.key(), - state_ver_key: node.state_ver_key, + state_ver_key: node.state_ver_key.clone(), stake: node.stake_table_entry.stake().as_u64(), da: false, }], From 3e645910821771bcb132a03b644a319efbaa1ec7 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 18:52:38 +0500 Subject: [PATCH 099/120] use fee,pos,marketplace to build binaries for integration-test --- .github/workflows/test.yml | 4 ++-- tests/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 50eedd1211..3737805559 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -94,8 +94,8 @@ jobs: - name: Build Bins run: | - cargo build --locked --profile test --bins - cargo build --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target + cargo build --features "fee,pos,marketplace" --locked --profile test --bins + cargo build --features "fee,pos,marketplace" --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target timeout-minutes: 60 - name: Upload archive to workflow diff --git a/tests/Cargo.toml b/tests/Cargo.toml index e33048117b..551f6f1f36 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -16,11 +16,11 @@ dotenvy = { workspace = true } espresso-types = { path = "../types", features = ["testing"] } ethers = { workspace = true } futures = { workspace = true } +hotshot-types = { workspace = true } reqwest = { workspace = true, features = ["json"] } sequencer-utils = { path = "../utils" } +serde = { workspace = true } surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } -hotshot-types = { workspace = true } -serde = { workspace = true } \ No newline at end of file From 6ee04ba7213c3ba3aaf5c3741ca74864d0a56f14 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 19:00:49 +0500 Subject: [PATCH 100/120] fix docker-compose.yaml --- docker-compose.yaml | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 22e29f0e34..c9309f5fd7 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -15,11 +15,10 @@ services: - "./geth-config/genesis-default.json:/genesis.json" - "./geth-config/test-jwt-secret.txt:/config/test-jwt-secret.txt" - deploy-sequencer-contracts: + deploy-fee-contract: image: ghcr.io/espressosystems/espresso-sequencer/deploy:main - command: deploy --only fee-contract,permissioned-stake-table + command: deploy --only fee-contract environment: - - ESPRESSO_SEQUENCER_INITIAL_PERMISSIONED_STAKE_TABLE_PATH=/data/initial_stake_table.toml - ESPRESSO_SEQUENCER_ETH_MULTISIG_ADDRESS - ESPRESSO_SEQUENCER_L1_PROVIDER - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL @@ -27,12 +26,24 @@ services: - RUST_LOG - RUST_LOG_FORMAT - ASYNC_STD_THREAD_COUNT - volumes: - - "./data/initial_stake_table.toml:/data/initial_stake_table.toml" depends_on: demo-l1-network: condition: service_healthy + deploy-stake-table-contract: + image: ghcr.io/espressosystems/espresso-sequencer/deploy:main + command: deploy --only permissioned-stake-table + environment: + - ESPRESSO_SEQUENCER_L1_PROVIDER + - ESPRESSO_SEQUENCER_L1_POLLING_INTERVAL + - ESPRESSO_DEPLOYER_ACCOUNT_INDEX + - RUST_LOG + - RUST_LOG_FORMAT + - ASYNC_STD_THREAD_COUNT + depends_on: + deploy-fee-contract: + condition: service_completed_successfully + deploy-prover-contracts: image: ghcr.io/espressosystems/espresso-sequencer/deploy:main command: deploy --use-mock-contract --only light-client @@ -54,7 +65,9 @@ services: sequencer0: condition: service_healthy # Make sure this doesn't start until the other contracts have been deployed, since we use the same mnemonic. - deploy-sequencer-contracts: + deploy-stake-table-contract: + condition: service_completed_successfully + deploy-fee-contract: condition: service_completed_successfully fund-builder: @@ -220,6 +233,19 @@ services: deploy-prover-contracts: condition: service_completed_successfully + + update-permissioned-stake-table: + image: ghcr.io/espressosystems/espresso-sequencer/update-permissioned-stake-table:main + environment: + - ESPRESSO_SEQUENCER_PERMISSIONED_STAKE_TABLE_ADDRESS + - ESPRESSO_SEQUENCER_STATE_PEERS=http://sequencer:$ESPRESSO_SEQUENCER_API_PORT + - ESPRESSO_SEQUENCER_ETH_MNEMONIC + - ESPRESSO_SEQUENCER_L1_PROVIDER + depends_on: + deploy-prover-contracts: + condition: service_completed_successfully + sequencer0: + condition: service_healthy sequencer0: image: ghcr.io/espressosystems/espresso-sequencer/sequencer:main ports: From a78a33dbb2fd559b03129e89d7323b54a68de340 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Tue, 25 Feb 2025 19:54:31 +0500 Subject: [PATCH 101/120] add marketplace feature to sqlite binary --- sequencer-sqlite/Cargo.toml | 1 + tests/upgrades.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/sequencer-sqlite/Cargo.toml b/sequencer-sqlite/Cargo.toml index 189225244f..8ada2b8dde 100644 --- a/sequencer-sqlite/Cargo.toml +++ b/sequencer-sqlite/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" [features] fee = ["sequencer/fee"] pos = ["sequencer/pos"] +marketplace = ["sequencer/marketplace"] default = ["embedded-db", "pos"] sqlite-unbundled = ["sequencer/sqlite-unbundled"] embedded-db = ["sequencer/embedded-db"] diff --git a/tests/upgrades.rs b/tests/upgrades.rs index d074ee35ac..9c6ebafb9d 100644 --- a/tests/upgrades.rs +++ b/tests/upgrades.rs @@ -26,15 +26,19 @@ async fn test_upgrade() -> Result<()> { println!("Initial State:{}", initial); let clients = testing.sequencer_clients; - + let client = clients[0].clone(); let height = test_header_version(clients.clone(), base, upgrade).await?; // check that atleast 50 blocks are produced after the upgrade test_blocks_production(clients.clone(), height, 50).await?; if upgrade == EpochVersion::version() { - test_stake_table_update(clients).await?; + test_stake_table_update(clients.clone()).await?; } + let height = client.get_height().await?; + // check that atleast 50 blocks are produced after the stake table updates + test_blocks_production(clients.clone(), height, 50).await?; + // TODO assert transactions are incrementing Ok(()) } From 2fc136f382bee157c1638cf0fe62455ce4ec4337 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 26 Feb 2025 13:56:45 +0500 Subject: [PATCH 102/120] insert randomized committee --- docker-compose.yaml | 12 ++++++------ types/src/v0/impls/stake_table.rs | 14 +++++++++++++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index c9309f5fd7..2e6a892065 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -85,7 +85,7 @@ services: - RUST_LOG - RUST_LOG_FORMAT depends_on: - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer1: condition: service_healthy @@ -304,7 +304,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer1: @@ -363,7 +363,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer2: @@ -414,7 +414,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer3: @@ -466,7 +466,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully sequencer4: @@ -517,7 +517,7 @@ services: condition: service_healthy marshal-0: condition: service_healthy - deploy-sequencer-contracts: + deploy-stake-table-contract: condition: service_completed_successfully submit-transactions-public: diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 0bb290947d..49d164badb 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -189,6 +189,8 @@ impl EpochCommittees { .filter(|entry| entry.stake() > U256::zero()) .collect(); + let randomized_committee = generate_stake_cdf(eligible_leaders.clone(), [0u8; 32]); + let committee = Committee { eligible_leaders, stake_table, @@ -200,6 +202,12 @@ impl EpochCommittees { self.state.insert(epoch, committee.clone()); self.state.insert(epoch + 1, committee.clone()); self.state.insert(epoch + 2, committee.clone()); + self.randomized_committees + .insert(epoch, randomized_committee.clone()); + self.randomized_committees + .insert(epoch + 1, randomized_committee.clone()); + self.randomized_committees + .insert(epoch + 1, randomized_committee.clone()); committee } @@ -245,6 +253,7 @@ impl EpochCommittees { .iter() .map(|entry| (PubKey::public_key(entry), entry.clone())) .collect(); + let randomized_committee = generate_stake_cdf(eligible_leaders.clone(), [0u8; 32]); let members = Committee { eligible_leaders, @@ -254,10 +263,13 @@ impl EpochCommittees { indexed_da_members, }; + let mut randomized_committees = BTreeMap::new(); + // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 and 2 let mut map = HashMap::new(); for epoch in Epoch::genesis().u64()..=50 { map.insert(Epoch::new(epoch), members.clone()); + randomized_committees.insert(Epoch::new(epoch), randomized_committee.clone()); } Self { @@ -267,7 +279,7 @@ impl EpochCommittees { l1_client: instance_state.l1_client.clone(), chain_config: instance_state.chain_config, peers: instance_state.peers.clone(), - randomized_committees: BTreeMap::new(), + randomized_committees, } } From 646b4ad32d78e1c8f3dd8b2c26557959af5a7e7c Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 26 Feb 2025 14:21:07 +0500 Subject: [PATCH 103/120] move public hotshot config to types crate --- Cargo.lock | 2 + client/Cargo.toml | 1 + client/src/lib.rs | 7 +- sequencer/src/api.rs | 16 +- sequencer/src/api/data_source.rs | 258 +---------------- .../bin/update-permissioned-stake-table.rs | 4 +- sequencer/src/catchup.rs | 7 +- tests/common/mod.rs | 22 +- types/Cargo.toml | 1 + types/src/v0/mod.rs | 3 + types/src/v0/v0_1/config.rs | 262 ++++++++++++++++++ types/src/v0/v0_1/mod.rs | 2 + types/src/v0/v0_2/mod.rs | 9 +- types/src/v0/v0_3/mod.rs | 5 +- types/src/v0/v0_99/mod.rs | 5 +- 15 files changed, 304 insertions(+), 300 deletions(-) create mode 100644 types/src/v0/v0_1/config.rs diff --git a/Cargo.lock b/Cargo.lock index e662a98789..44f4170e75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2497,6 +2497,7 @@ dependencies = [ "futures", "hotshot-types", "jf-merkle-tree", + "serde_json", "surf-disco", "tokio", "tracing", @@ -3754,6 +3755,7 @@ dependencies = [ "tracing", "url", "vbs", + "vec1", ] [[package]] diff --git a/client/Cargo.toml b/client/Cargo.toml index 9fe84cf055..e4d6d51ab5 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -16,3 +16,4 @@ surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } +serde_json = { workspace = true } \ No newline at end of file diff --git a/client/src/lib.rs b/client/src/lib.rs index f941d96be7..2370201787 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header, PubKey}; +use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header, PubKey, PublicNetworkConfig}; use ethers::types::Address; use futures::{stream::BoxStream, StreamExt}; use hotshot_types::stake_table::StakeTableEntry; @@ -10,7 +10,6 @@ use jf_merkle_tree::{ use std::time::Duration; use surf_disco::{ error::ClientError, - http::convert::DeserializeOwned, socket::{Connection, Unsupported}, Url, }; @@ -146,9 +145,9 @@ impl SequencerClient { .context("getting da stake table") } - pub async fn config(&self) -> anyhow::Result { + pub async fn config(&self) -> anyhow::Result { self.0 - .get::("config/hotshot") + .get::("config/hotshot") .send() .await .context("getting hotshot config") diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 4c26e3a660..2c2a4fa246 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -7,8 +7,8 @@ use data_source::{CatchupDataSource, StakeTableDataSource, SubmitDataSource}; use derivative::Derivative; use espresso_types::{ retain_accounts, v0::traits::SequencerPersistence, v0_99::ChainConfig, AccountQueryData, - BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, Transaction, - ValidatedState, + BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, + PublicNetworkConfig, Transaction, ValidatedState, }; use futures::{ future::{BoxFuture, Future, FutureExt}, @@ -35,9 +35,7 @@ use jf_merkle_tree::MerkleTreeScheme; use std::pin::Pin; use std::sync::Arc; -use self::data_source::{ - HotShotConfigDataSource, NodeStateDataSource, PublicNetworkConfig, StateSignatureDataSource, -}; +use self::data_source::{HotShotConfigDataSource, NodeStateDataSource, StateSignatureDataSource}; use crate::{ catchup::CatchupStorage, context::Consensus, state_signature::StateSigner, SeqTypes, SequencerApiVersion, SequencerContext, @@ -1612,10 +1610,7 @@ mod test { use tokio::time::sleep; use espresso_types::{ - traits::NullEventConsumer, - v0_1::{UpgradeMode, ViewBasedUpgrade}, - BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, - SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, ValidatedState, + traits::NullEventConsumer, v0_1::{UpgradeMode, ViewBasedUpgrade}, BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, PublicHotShotConfig, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, ValidatedState }; use ethers::utils::Anvil; use futures::{ @@ -1645,8 +1640,7 @@ mod test { use vbs::version::{StaticVersion, StaticVersionType, Version}; use self::{ - data_source::{testing::TestableSequencerDataSource, PublicHotShotConfig}, - options::HotshotEvents, + data_source::testing::TestableSequencerDataSource, options::HotshotEvents, sql::DataSource as SqlDataSource, }; use super::*; diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index b8afaa5b95..fc4f8784ab 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -1,12 +1,11 @@ -use std::{num::NonZeroUsize, time::Duration}; - use anyhow::Context; use async_trait::async_trait; use committable::Commitment; use espresso_types::{ v0::traits::{PersistenceOptions, SequencerPersistence}, v0_99::ChainConfig, - FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, Transaction, + FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, PublicNetworkConfig, + Transaction, }; use futures::future::Future; use hotshot_query_service::{ @@ -16,21 +15,15 @@ use hotshot_query_service::{ node::NodeDataSource, status::StatusDataSource, }; +use hotshot_types::traits::node_implementation::NodeType; use hotshot_types::{ data::ViewNumber, light_client::StateSignatureRequestBody, - network::NetworkConfig, stake_table::StakeTableEntry, traits::{network::ConnectedNetwork, node_implementation::Versions}, - HotShotConfig, PeerConfig, ValidatorConfig, -}; -use hotshot_types::{ - network::{BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig}, - traits::node_implementation::NodeType, }; -use serde::{Deserialize, Serialize}; + use tide_disco::Url; -use vec1::Vec1; use super::{ fs, @@ -198,249 +191,6 @@ pub(crate) trait CatchupDataSource: Sync { ) -> impl Send + Future>; } -/// This struct defines the public Hotshot validator configuration. -/// Private key and state key pairs are excluded for security reasons. - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicValidatorConfig { - public_key: PubKey, - stake_value: u64, - is_da: bool, - private_key: String, - state_public_key: String, - state_key_pair: String, -} - -impl From> for PublicValidatorConfig { - fn from(v: ValidatorConfig) -> Self { - let ValidatorConfig:: { - public_key, - private_key: _, - stake_value, - state_key_pair, - is_da, - } = v; - - let state_public_key = state_key_pair.ver_key(); - - Self { - public_key, - stake_value, - is_da, - state_public_key: state_public_key.to_string(), - private_key: "*****".into(), - state_key_pair: "*****".into(), - } - } -} - -/// This struct defines the public Hotshot configuration parameters. -/// Our config module features a GET endpoint accessible via the route `/hotshot` to display the hotshot config parameters. -/// Hotshot config has sensitive information like private keys and such fields are excluded from this struct. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicHotShotConfig { - start_threshold: (u64, u64), - num_nodes_with_stake: NonZeroUsize, - known_nodes_with_stake: Vec>, - known_da_nodes: Vec>, - da_staked_committee_size: usize, - fixed_leader_for_gpuvid: usize, - next_view_timeout: u64, - view_sync_timeout: Duration, - num_bootstrap: usize, - builder_timeout: Duration, - data_request_delay: Duration, - builder_urls: Vec1, - start_proposing_view: u64, - stop_proposing_view: u64, - start_voting_view: u64, - stop_voting_view: u64, - start_proposing_time: u64, - stop_proposing_time: u64, - start_voting_time: u64, - stop_voting_time: u64, - epoch_height: u64, -} - -impl From> for PublicHotShotConfig { - fn from(v: HotShotConfig) -> Self { - // Destructure all fields from HotShotConfig to return an error - // if new fields are added to HotShotConfig. This makes sure that we handle - // all fields appropriately and do not miss any updates. - let HotShotConfig:: { - start_threshold, - num_nodes_with_stake, - known_nodes_with_stake, - known_da_nodes, - da_staked_committee_size, - fixed_leader_for_gpuvid, - next_view_timeout, - view_sync_timeout, - num_bootstrap, - builder_timeout, - data_request_delay, - builder_urls, - start_proposing_view, - stop_proposing_view, - start_voting_view, - stop_voting_view, - start_proposing_time, - stop_proposing_time, - start_voting_time, - stop_voting_time, - epoch_height, - } = v; - - Self { - start_threshold, - num_nodes_with_stake, - known_nodes_with_stake, - known_da_nodes, - da_staked_committee_size, - fixed_leader_for_gpuvid, - next_view_timeout, - view_sync_timeout, - num_bootstrap, - builder_timeout, - data_request_delay, - builder_urls, - start_proposing_view, - stop_proposing_view, - start_voting_view, - stop_voting_view, - start_proposing_time, - stop_proposing_time, - start_voting_time, - stop_voting_time, - epoch_height, - } - } -} - -impl PublicHotShotConfig { - pub fn into_hotshot_config(self) -> HotShotConfig { - HotShotConfig { - start_threshold: self.start_threshold, - num_nodes_with_stake: self.num_nodes_with_stake, - known_nodes_with_stake: self.known_nodes_with_stake, - known_da_nodes: self.known_da_nodes, - da_staked_committee_size: self.da_staked_committee_size, - fixed_leader_for_gpuvid: self.fixed_leader_for_gpuvid, - next_view_timeout: self.next_view_timeout, - view_sync_timeout: self.view_sync_timeout, - num_bootstrap: self.num_bootstrap, - builder_timeout: self.builder_timeout, - data_request_delay: self.data_request_delay, - builder_urls: self.builder_urls, - start_proposing_view: self.start_proposing_view, - stop_proposing_view: self.stop_proposing_view, - start_voting_view: self.start_voting_view, - stop_voting_view: self.stop_voting_view, - start_proposing_time: self.start_proposing_time, - stop_proposing_time: self.stop_proposing_time, - start_voting_time: self.start_voting_time, - stop_voting_time: self.stop_voting_time, - epoch_height: self.epoch_height, - } - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicNetworkConfig { - rounds: usize, - indexed_da: bool, - transactions_per_round: usize, - manual_start_password: Option, - num_bootrap: usize, - next_view_timeout: u64, - view_sync_timeout: Duration, - builder_timeout: Duration, - data_request_delay: Duration, - node_index: u64, - seed: [u8; 32], - transaction_size: usize, - key_type_name: String, - libp2p_config: Option, - config: PublicHotShotConfig, - cdn_marshal_address: Option, - combined_network_config: Option, - commit_sha: String, - builder: BuilderType, - random_builder: Option, -} - -impl From> for PublicNetworkConfig { - fn from(cfg: NetworkConfig) -> Self { - Self { - rounds: cfg.rounds, - indexed_da: cfg.indexed_da, - transactions_per_round: cfg.transactions_per_round, - manual_start_password: Some("*****".into()), - num_bootrap: cfg.num_bootrap, - next_view_timeout: cfg.next_view_timeout, - view_sync_timeout: cfg.view_sync_timeout, - builder_timeout: cfg.builder_timeout, - data_request_delay: cfg.data_request_delay, - node_index: cfg.node_index, - seed: cfg.seed, - transaction_size: cfg.transaction_size, - key_type_name: cfg.key_type_name, - libp2p_config: cfg.libp2p_config, - config: cfg.config.into(), - cdn_marshal_address: cfg.cdn_marshal_address, - combined_network_config: cfg.combined_network_config, - commit_sha: cfg.commit_sha, - builder: cfg.builder, - random_builder: cfg.random_builder, - } - } -} - -impl PublicNetworkConfig { - pub fn into_network_config( - self, - my_own_validator_config: ValidatorConfig, - ) -> anyhow::Result> { - let node_index = self - .config - .known_nodes_with_stake - .iter() - .position(|peer| peer.stake_table_entry.stake_key == my_own_validator_config.public_key) - .context(format!( - "the node {} is not in the stake table", - my_own_validator_config.public_key - ))? as u64; - - Ok(NetworkConfig { - rounds: self.rounds, - indexed_da: self.indexed_da, - transactions_per_round: self.transactions_per_round, - manual_start_password: self.manual_start_password, - num_bootrap: self.num_bootrap, - next_view_timeout: self.next_view_timeout, - view_sync_timeout: self.view_sync_timeout, - builder_timeout: self.builder_timeout, - data_request_delay: self.data_request_delay, - node_index, - seed: self.seed, - transaction_size: self.transaction_size, - key_type_name: self.key_type_name, - libp2p_config: self.libp2p_config, - config: self.config.into_hotshot_config(), - cdn_marshal_address: self.cdn_marshal_address, - combined_network_config: self.combined_network_config, - commit_sha: self.commit_sha, - builder: self.builder, - random_builder: self.random_builder, - public_keys: Vec::new(), - }) - } - - pub fn hotshot_config(&self) -> PublicHotShotConfig { - self.config.clone() - } -} - #[cfg(any(test, feature = "testing"))] pub mod testing { use super::{super::Options, *}; diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index d0a3bac1c1..e85cc39d5a 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -1,10 +1,10 @@ use anyhow::{Context, Result}; use clap::Parser; use client::SequencerClient; -use espresso_types::parse_duration; +use espresso_types::{parse_duration, PublicNetworkConfig}; use ethers::types::Address; use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType}; -use sequencer::api::data_source::PublicNetworkConfig; + use sequencer_utils::{ logging, stake_table::{update_stake_table, PermissionedStakeTableUpdate}, diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index 8c8ca7a667..99ea884bda 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -6,6 +6,7 @@ use async_trait::async_trait; use committable::Commitment; use committable::Committable; use espresso_types::traits::SequencerPersistence; +use espresso_types::PublicNetworkConfig; use espresso_types::{ v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, @@ -31,10 +32,8 @@ use tokio::time::timeout; use url::Url; use vbs::version::StaticVersionType; -use crate::{ - api::{data_source::PublicNetworkConfig, BlocksFrontier}, - PubKey, -}; +use crate::api::BlocksFrontier; +use crate::PubKey; // This newtype is probably not worth having. It's only used to be able to log // URLs before doing requests. diff --git a/tests/common/mod.rs b/tests/common/mod.rs index f0abe66e0e..dc5f8e989d 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,14 +1,13 @@ use anyhow::{anyhow, Context, Result}; use client::SequencerClient; -use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion, PubKey}; +use espresso_types::{FeeAmount, FeeVersion, MarketplaceVersion}; use ethers::prelude::*; use futures::future::{join_all, BoxFuture}; use futures::FutureExt; use hotshot_types::network::PeerConfigKeys; use hotshot_types::traits::signature_key::StakeTableEntryType; -use hotshot_types::PeerConfig; + use std::{fmt, str::FromStr, time::Duration}; -use surf_disco::http::convert::Deserialize; use surf_disco::Url; use tokio::time::{sleep, timeout}; use vbs::version::StaticVersionType; @@ -364,11 +363,11 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() .boxed() }; - let config = client.config::().await?.config; + let config = client.config().await?.hotshot_config(); // currently stake table update does not support DA node member changes - let stake_table = config.known_nodes_with_stake; - let da_members = config.known_da_nodes; + let stake_table = config.known_nodes_with_stake(); + let da_members = config.known_da_nodes(); // filtering out DA nodes let non_da_stakers: Vec<_> = stake_table @@ -404,14 +403,3 @@ pub async fn test_stake_table_update(clients: Vec) -> Result<() Ok(()) } - -#[derive(Debug, Deserialize)] -struct PublicHotShotConfig { - known_nodes_with_stake: Vec>, - known_da_nodes: Vec>, -} - -#[derive(Debug, Deserialize)] -struct PublicNetworkConfig { - config: PublicHotShotConfig, -} diff --git a/types/Cargo.toml b/types/Cargo.toml index 7828de6db7..105849e075 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -57,6 +57,7 @@ tower-service = { version = "0.3", default-features = false } tracing = { workspace = true } url = { workspace = true } vbs = { workspace = true } +vec1 = { workspace = true } [dev-dependencies] espresso-types = { path = ".", features = [ "testing" ] } diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index 56bc560582..12440da3d7 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -120,6 +120,9 @@ reexport_unchanged_types!( TimeBasedUpgrade, ViewBasedUpgrade, BlockSize, + PublicHotShotConfig, + PublicNetworkConfig, + PublicValidatorConfig ); pub(crate) use v0_3::{L1ClientMetrics, L1Event, L1State, L1UpdateTask}; diff --git a/types/src/v0/v0_1/config.rs b/types/src/v0/v0_1/config.rs new file mode 100644 index 0000000000..4f52574f20 --- /dev/null +++ b/types/src/v0/v0_1/config.rs @@ -0,0 +1,262 @@ +use std::{num::NonZeroUsize, time::Duration}; + +use anyhow::Context; +use vec1::Vec1; + +use crate::PubKey; +use hotshot_types::network::{ + BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig, +}; +use hotshot_types::{network::NetworkConfig, HotShotConfig, PeerConfig, ValidatorConfig}; +use serde::{Deserialize, Serialize}; +use tide_disco::Url; + +/// This struct defines the public Hotshot validator configuration. +/// Private key and state key pairs are excluded for security reasons. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicValidatorConfig { + public_key: PubKey, + stake_value: u64, + is_da: bool, + private_key: String, + state_public_key: String, + state_key_pair: String, +} + +impl From> for PublicValidatorConfig { + fn from(v: ValidatorConfig) -> Self { + let ValidatorConfig:: { + public_key, + private_key: _, + stake_value, + state_key_pair, + is_da, + } = v; + + let state_public_key = state_key_pair.ver_key(); + + Self { + public_key, + stake_value, + is_da, + state_public_key: state_public_key.to_string(), + private_key: "*****".into(), + state_key_pair: "*****".into(), + } + } +} + +/// This struct defines the public Hotshot configuration parameters. +/// Our config module features a GET endpoint accessible via the route `/hotshot` to display the hotshot config parameters. +/// Hotshot config has sensitive information like private keys and such fields are excluded from this struct. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicHotShotConfig { + start_threshold: (u64, u64), + num_nodes_with_stake: NonZeroUsize, + known_nodes_with_stake: Vec>, + known_da_nodes: Vec>, + da_staked_committee_size: usize, + fixed_leader_for_gpuvid: usize, + next_view_timeout: u64, + view_sync_timeout: Duration, + num_bootstrap: usize, + builder_timeout: Duration, + data_request_delay: Duration, + builder_urls: Vec1, + start_proposing_view: u64, + stop_proposing_view: u64, + start_voting_view: u64, + stop_voting_view: u64, + start_proposing_time: u64, + stop_proposing_time: u64, + start_voting_time: u64, + stop_voting_time: u64, + epoch_height: u64, +} + +impl From> for PublicHotShotConfig { + fn from(v: HotShotConfig) -> Self { + // Destructure all fields from HotShotConfig to return an error + // if new fields are added to HotShotConfig. This makes sure that we handle + // all fields appropriately and do not miss any updates. + let HotShotConfig:: { + start_threshold, + num_nodes_with_stake, + known_nodes_with_stake, + known_da_nodes, + da_staked_committee_size, + fixed_leader_for_gpuvid, + next_view_timeout, + view_sync_timeout, + num_bootstrap, + builder_timeout, + data_request_delay, + builder_urls, + start_proposing_view, + stop_proposing_view, + start_voting_view, + stop_voting_view, + start_proposing_time, + stop_proposing_time, + start_voting_time, + stop_voting_time, + epoch_height, + } = v; + + Self { + start_threshold, + num_nodes_with_stake, + known_nodes_with_stake, + known_da_nodes, + da_staked_committee_size, + fixed_leader_for_gpuvid, + next_view_timeout, + view_sync_timeout, + num_bootstrap, + builder_timeout, + data_request_delay, + builder_urls, + start_proposing_view, + stop_proposing_view, + start_voting_view, + stop_voting_view, + start_proposing_time, + stop_proposing_time, + start_voting_time, + stop_voting_time, + epoch_height, + } + } +} + +impl PublicHotShotConfig { + pub fn into_hotshot_config(self) -> HotShotConfig { + HotShotConfig { + start_threshold: self.start_threshold, + num_nodes_with_stake: self.num_nodes_with_stake, + known_nodes_with_stake: self.known_nodes_with_stake, + known_da_nodes: self.known_da_nodes, + da_staked_committee_size: self.da_staked_committee_size, + fixed_leader_for_gpuvid: self.fixed_leader_for_gpuvid, + next_view_timeout: self.next_view_timeout, + view_sync_timeout: self.view_sync_timeout, + num_bootstrap: self.num_bootstrap, + builder_timeout: self.builder_timeout, + data_request_delay: self.data_request_delay, + builder_urls: self.builder_urls, + start_proposing_view: self.start_proposing_view, + stop_proposing_view: self.stop_proposing_view, + start_voting_view: self.start_voting_view, + stop_voting_view: self.stop_voting_view, + start_proposing_time: self.start_proposing_time, + stop_proposing_time: self.stop_proposing_time, + start_voting_time: self.start_voting_time, + stop_voting_time: self.stop_voting_time, + epoch_height: self.epoch_height, + } + } + + pub fn known_nodes_with_stake(&self) -> Vec> { + self.known_nodes_with_stake.clone() + } + + pub fn known_da_nodes(&self) -> Vec> { + self.known_da_nodes.clone() + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct PublicNetworkConfig { + rounds: usize, + indexed_da: bool, + transactions_per_round: usize, + manual_start_password: Option, + num_bootrap: usize, + next_view_timeout: u64, + view_sync_timeout: Duration, + builder_timeout: Duration, + data_request_delay: Duration, + node_index: u64, + seed: [u8; 32], + transaction_size: usize, + key_type_name: String, + libp2p_config: Option, + config: PublicHotShotConfig, + cdn_marshal_address: Option, + combined_network_config: Option, + commit_sha: String, + builder: BuilderType, + random_builder: Option, +} + +impl From> for PublicNetworkConfig { + fn from(cfg: NetworkConfig) -> Self { + Self { + rounds: cfg.rounds, + indexed_da: cfg.indexed_da, + transactions_per_round: cfg.transactions_per_round, + manual_start_password: Some("*****".into()), + num_bootrap: cfg.num_bootrap, + next_view_timeout: cfg.next_view_timeout, + view_sync_timeout: cfg.view_sync_timeout, + builder_timeout: cfg.builder_timeout, + data_request_delay: cfg.data_request_delay, + node_index: cfg.node_index, + seed: cfg.seed, + transaction_size: cfg.transaction_size, + key_type_name: cfg.key_type_name, + libp2p_config: cfg.libp2p_config, + config: cfg.config.into(), + cdn_marshal_address: cfg.cdn_marshal_address, + combined_network_config: cfg.combined_network_config, + commit_sha: cfg.commit_sha, + builder: cfg.builder, + random_builder: cfg.random_builder, + } + } +} + +impl PublicNetworkConfig { + pub fn into_network_config( + self, + my_own_validator_config: ValidatorConfig, + ) -> anyhow::Result> { + let node_index = self + .config + .known_nodes_with_stake + .iter() + .position(|peer| peer.stake_table_entry.stake_key == my_own_validator_config.public_key) + .context(format!( + "the node {} is not in the stake table", + my_own_validator_config.public_key + ))? as u64; + + Ok(NetworkConfig { + rounds: self.rounds, + indexed_da: self.indexed_da, + transactions_per_round: self.transactions_per_round, + manual_start_password: self.manual_start_password, + num_bootrap: self.num_bootrap, + next_view_timeout: self.next_view_timeout, + view_sync_timeout: self.view_sync_timeout, + builder_timeout: self.builder_timeout, + data_request_delay: self.data_request_delay, + node_index, + seed: self.seed, + transaction_size: self.transaction_size, + key_type_name: self.key_type_name, + libp2p_config: self.libp2p_config, + config: self.config.into_hotshot_config(), + cdn_marshal_address: self.cdn_marshal_address, + combined_network_config: self.combined_network_config, + commit_sha: self.commit_sha, + builder: self.builder, + random_builder: self.random_builder, + public_keys: Vec::new(), + }) + } + + pub fn hotshot_config(&self) -> PublicHotShotConfig { + self.config.clone() + } +} diff --git a/types/src/v0/v0_1/mod.rs b/types/src/v0/v0_1/mod.rs index 7115342c14..494659e1be 100644 --- a/types/src/v0/v0_1/mod.rs +++ b/types/src/v0/v0_1/mod.rs @@ -4,6 +4,7 @@ pub const VERSION: Version = Version { major: 0, minor: 1 }; mod block; mod chain_config; +mod config; mod fee_info; mod header; mod instance_state; @@ -14,6 +15,7 @@ mod transaction; pub use block::*; pub use chain_config::*; +pub use config::*; pub use fee_info::*; pub use header::Header; pub use instance_state::*; diff --git a/types/src/v0/v0_2/mod.rs b/types/src/v0/v0_2/mod.rs index b550f2c5a7..5d8acb79f4 100644 --- a/types/src/v0/v0_2/mod.rs +++ b/types/src/v0/v0_2/mod.rs @@ -4,10 +4,11 @@ use vbs::version::Version; pub use super::v0_1::{ AccountQueryData, BlockMerkleCommitment, BlockMerkleTree, BlockSize, BuilderSignature, ChainConfig, ChainId, Delta, FeeAccount, FeeAccountProof, FeeAmount, FeeInfo, - FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, - L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, - NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, - NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, ResolvableChainConfig, + FeeMerkleCommitment, FeeMerkleProof, FeeMerkleTree, Header, Index, Iter, L1BlockInfo, L1Client, + L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, + NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, + NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, + PublicHotShotConfig, PublicNetworkConfig, PublicValidatorConfig, ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index ec59b858b4..b96fb014a9 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -7,8 +7,9 @@ pub use super::v0_1::{ FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, - NumTxsUnchecked, Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, - TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + NumTxsUnchecked, Payload, PayloadByteLen, PublicHotShotConfig, PublicNetworkConfig, + PublicValidatorConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; diff --git a/types/src/v0/v0_99/mod.rs b/types/src/v0/v0_99/mod.rs index 3e676a4b2a..f45ce0bd5d 100644 --- a/types/src/v0/v0_99/mod.rs +++ b/types/src/v0/v0_99/mod.rs @@ -7,8 +7,9 @@ pub use super::v0_1::{ FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, - NumTxsUnchecked, Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, - TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + NumTxsUnchecked, Payload, PayloadByteLen, PublicHotShotConfig, PublicNetworkConfig, + PublicValidatorConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; From e18cb5a4da72004632b0746162b81f5bd2dabaad Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 26 Feb 2025 14:31:44 +0500 Subject: [PATCH 104/120] fix randomized committee preloading --- sequencer-sqlite/Cargo.lock | 2 ++ sequencer/src/api.rs | 6 +++++- sequencer/src/bin/update-permissioned-stake-table.rs | 4 ++-- types/src/v0/impls/stake_table.rs | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index e4b942b022..62b192f64a 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -2388,6 +2388,7 @@ dependencies = [ "futures", "hotshot-types", "jf-merkle-tree", + "serde_json", "surf-disco", "tokio", "tracing", @@ -3579,6 +3580,7 @@ dependencies = [ "tracing", "url", "vbs", + "vec1", ] [[package]] diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 2c2a4fa246..d637767621 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -1610,7 +1610,11 @@ mod test { use tokio::time::sleep; use espresso_types::{ - traits::NullEventConsumer, v0_1::{UpgradeMode, ViewBasedUpgrade}, BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, PublicHotShotConfig, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, ValidatedState + traits::NullEventConsumer, + v0_1::{UpgradeMode, ViewBasedUpgrade}, + BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, + PublicHotShotConfig, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, + ValidatedState, }; use ethers::utils::Anvil; use futures::{ diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index e85cc39d5a..a966a30f22 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -1,7 +1,7 @@ use anyhow::{Context, Result}; use clap::Parser; use client::SequencerClient; -use espresso_types::{parse_duration, PublicNetworkConfig}; +use espresso_types::parse_duration; use ethers::types::Address; use hotshot_types::{network::PeerConfigKeys, traits::signature_key::StakeTableEntryType}; @@ -109,7 +109,7 @@ async fn main() -> Result<()> { for client in &clients { tracing::warn!("calling config endpoint of {client:?}"); - match client.config::().await { + match client.config().await { Ok(config) => { let hotshot = config.hotshot_config().into_hotshot_config(); let st = hotshot.known_nodes_with_stake; diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 49d164badb..da2a90662b 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -207,7 +207,7 @@ impl EpochCommittees { self.randomized_committees .insert(epoch + 1, randomized_committee.clone()); self.randomized_committees - .insert(epoch + 1, randomized_committee.clone()); + .insert(epoch + 2, randomized_committee.clone()); committee } From 9bc5e575e45e155eda1deeadc72856e4c6d059f4 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Wed, 26 Feb 2025 17:10:38 +0500 Subject: [PATCH 105/120] assert upgrade 10 views after upgrade proposal --- sequencer/src/api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index d637767621..c76207e192 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -2357,7 +2357,7 @@ mod test { // ChainConfigs will eventually be resolved if let Some(configs) = configs { tracing::info!(?configs, "configs"); - if height > new_version_first_view { + if height > new_version_first_view + 10 { for config in configs { assert_eq!(config, chain_config_upgrade); } From 6f0790824558504358cacbfe1aa534ce8806e03f Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Thu, 27 Feb 2025 03:13:12 +0500 Subject: [PATCH 106/120] fix: off by one epoch for getting drb results --- client/Cargo.toml | 2 +- hotshot-task-impls/src/quorum_proposal/handlers.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/client/Cargo.toml b/client/Cargo.toml index e4d6d51ab5..fb65f0ce4b 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -12,8 +12,8 @@ ethers = { workspace = true } futures = { workspace = true } hotshot-types = { workspace = true } jf-merkle-tree = { workspace = true } +serde_json = { workspace = true } surf-disco = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } vbs = { workspace = true } -serde_json = { workspace = true } \ No newline at end of file diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index 6e4e73d671..22c24c85e5 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -407,7 +407,7 @@ impl ProposalDependencyHandle { .await .drb_seeds_and_results .results - .get(epoch_val) + .get(&(*epoch_val + 1)) .copied() } else { None From d18c6bc74642e19a9695d29f138e76d6e02fb8c4 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Mar 2025 03:18:00 +0500 Subject: [PATCH 107/120] fixes after merging main --- builder/src/lib.rs | 1 + client/src/lib.rs | 4 +- flake.nix | 2 +- hotshot-builder-core/src/service.rs | 5 +- .../examples/simple-server.rs | 1 + .../src/data_source/storage/sql.rs | 1 - sequencer-sqlite/Cargo.lock | 2 + sequencer/src/api.rs | 11 +- sequencer/src/api/data_source.rs | 9 +- sequencer/src/catchup.rs | 1 - sequencer/src/persistence.rs | 13 +- sequencer/src/persistence/sql.rs | 1 - types/src/v0/impls/stake_table.rs | 85 +++--- types/src/v0/mod.rs | 3 - types/src/v0/v0_1/config.rs | 262 ------------------ types/src/v0/v0_1/mod.rs | 2 - types/src/v0/v0_2/mod.rs | 9 +- types/src/v0/v0_3/header.rs | 4 +- types/src/v0/v0_3/mod.rs | 5 +- types/src/v0/v0_99/mod.rs | 5 +- 20 files changed, 78 insertions(+), 348 deletions(-) delete mode 100644 types/src/v0/v0_1/config.rs diff --git a/builder/src/lib.rs b/builder/src/lib.rs index c36e5d2034..f1e22d9f30 100755 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -139,6 +139,7 @@ pub mod testing { stop_proposing_time: 0, stop_voting_time: 0, epoch_height: 150, + epoch_start_block: 0, }; Self { diff --git a/client/src/lib.rs b/client/src/lib.rs index 2370201787..aecf930e70 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -1,5 +1,7 @@ use anyhow::Context; -use espresso_types::{FeeAccount, FeeAmount, FeeMerkleTree, Header, PubKey, PublicNetworkConfig}; +use espresso_types::{ + config::PublicNetworkConfig, FeeAccount, FeeAmount, FeeMerkleTree, Header, PubKey, +}; use ethers::types::Address; use futures::{stream::BoxStream, StreamExt}; use hotshot_types::stake_table::StakeTableEntry; diff --git a/flake.nix b/flake.nix index d4843aa195..f6f5f83242 100644 --- a/flake.nix +++ b/flake.nix @@ -316,4 +316,4 @@ ]; }); }); -} \ No newline at end of file +} diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index 20beb9bfe0..a45c4039a0 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -1558,10 +1558,7 @@ mod test { use hotshot_types::{ data::{vid_commitment, Leaf, ViewNumber}, message::Proposal, - traits::{ - block_contents::vid_commitment, node_implementation::ConsensusTime, - signature_key::BuilderSignatureKey, - }, + traits::{node_implementation::ConsensusTime, signature_key::BuilderSignatureKey}, utils::BuilderCommitment, }; use marketplace_builder_shared::{ diff --git a/hotshot-query-service/examples/simple-server.rs b/hotshot-query-service/examples/simple-server.rs index 50bb9c26db..fc17d47079 100644 --- a/hotshot-query-service/examples/simple-server.rs +++ b/hotshot-query-service/examples/simple-server.rs @@ -217,6 +217,7 @@ async fn init_consensus( start_voting_time: 0, stop_voting_time: 0, epoch_height: 150, + epoch_start_block: 0, }; let nodes = join_all(priv_keys.into_iter().zip(data_sources).enumerate().map( diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index 0143f4501b..b12cb5efbb 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -37,7 +37,6 @@ use log::LevelFilter; #[cfg(not(feature = "embedded-db"))] use futures::future::FutureExt; -use serde_json::Value; #[cfg(not(feature = "embedded-db"))] use sqlx::postgres::{PgConnectOptions, PgSslMode}; #[cfg(feature = "embedded-db")] diff --git a/sequencer-sqlite/Cargo.lock b/sequencer-sqlite/Cargo.lock index 53578632ee..be032c682a 100644 --- a/sequencer-sqlite/Cargo.lock +++ b/sequencer-sqlite/Cargo.lock @@ -2386,7 +2386,9 @@ dependencies = [ "espresso-types", "ethers", "futures", + "hotshot-types", "jf-merkle-tree 0.1.0", + "serde_json", "surf-disco", "tokio", "tracing", diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index 134dfc1d83..3b47bf520a 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -174,7 +174,7 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> async fn get_da_members( &self, epoch: Option<::Epoch>, - ) -> Vec::SignatureKey>> { + ) -> Vec::SignatureKey>> { self.as_ref().get_da_members(epoch).await } @@ -188,7 +188,7 @@ impl, D: Sync, V: Versions, P: SequencerPersistence> /// Get the stake table for the current epoch if not provided async fn get_da_members_current( &self, - ) -> Vec::SignatureKey>> { + ) -> Vec::SignatureKey>> { self.as_ref().get_da_members_current().await } @@ -231,7 +231,7 @@ impl, V: Versions, P: SequencerPersistence> async fn get_da_members( &self, epoch: Option<::Epoch>, - ) -> Vec::SignatureKey>> { + ) -> Vec::SignatureKey>> { self.consensus() .await .read() @@ -245,7 +245,7 @@ impl, V: Versions, P: SequencerPersistence> /// Get the stake table for the current epoch if not provided async fn get_da_members_current( &self, - ) -> Vec::SignatureKey>> { + ) -> Vec::SignatureKey>> { let epoch = self.consensus().await.read().await.cur_epoch().await; self.get_da_members(epoch).await @@ -1619,8 +1619,7 @@ mod test { traits::NullEventConsumer, v0_1::{UpgradeMode, ViewBasedUpgrade}, BackoffParams, FeeAccount, FeeAmount, Header, MarketplaceVersion, MockSequencerVersions, - PublicHotShotConfig, SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, - ValidatedState, + SequencerVersions, TimeBasedUpgrade, Timestamp, Upgrade, UpgradeType, ValidatedState, }; use ethers::utils::Anvil; use futures::{ diff --git a/sequencer/src/api/data_source.rs b/sequencer/src/api/data_source.rs index 40a8ee93ef..e61eebebb4 100644 --- a/sequencer/src/api/data_source.rs +++ b/sequencer/src/api/data_source.rs @@ -5,8 +5,7 @@ use espresso_types::{ config::PublicNetworkConfig, v0::traits::{PersistenceOptions, SequencerPersistence}, v0_99::ChainConfig, - FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, PublicNetworkConfig, - Transaction, + FeeAccount, FeeAccountProof, FeeMerkleTree, NodeState, PubKey, Transaction, }; use futures::future::Future; use hotshot_query_service::{ @@ -119,18 +118,18 @@ pub(crate) trait StakeTableDataSource { /// Get the stake table for the current epoch if not provided fn get_stake_table_current( &self, - ) -> impl Send + Future>>; + ) -> impl Send + Future::SignatureKey>>>; fn get_current_epoch(&self) -> impl Send + Future>; fn get_da_members( &self, epoch: Option<::Epoch>, - ) -> impl Send + Future::SignatureKey>>>; + ) -> impl Send + Future::SignatureKey>>>; /// Get the stake table for the current epoch if not provided fn get_da_members_current( &self, - ) -> impl Send + Future::SignatureKey>>>; + ) -> impl Send + Future::SignatureKey>>>; } pub(crate) trait CatchupDataSource: Sync { diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index ae5cf7d43e..7e64d2a4ca 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -7,7 +7,6 @@ use committable::Commitment; use committable::Committable; use espresso_types::config::PublicNetworkConfig; use espresso_types::traits::SequencerPersistence; -use espresso_types::PublicNetworkConfig; use espresso_types::{ v0::traits::StateCatchup, v0_99::ChainConfig, BackoffParams, BlockMerkleTree, FeeAccount, FeeAccountProof, FeeMerkleCommitment, FeeMerkleTree, Leaf2, NodeState, diff --git a/sequencer/src/persistence.rs b/sequencer/src/persistence.rs index 6d8661a6ed..58ca0a461f 100644 --- a/sequencer/src/persistence.rs +++ b/sequencer/src/persistence.rs @@ -43,7 +43,8 @@ mod testing { #[cfg(test)] #[espresso_macros::generic_tests] mod persistence_tests { - use std::{collections::BTreeMap, marker::PhantomData}; + use sequencer_utils::test_utils::setup_test; + use std::{collections::BTreeMap, marker::PhantomData, sync::Arc}; use vbs::version::StaticVersionType; use anyhow::bail; @@ -77,7 +78,7 @@ mod persistence_tests { vote::HasViewNumber, }; - use super::*; + use super::{testing::TestablePersistence, *}; use vbs::version::Version; #[derive(Clone, Debug, Default)] @@ -227,7 +228,7 @@ mod persistence_tests { Some(convert_proposal(vid_share0.clone())) ); - vid.set_view_number(ViewNumber::new(1)); + vid.view_number = ViewNumber::new(1); let vid_share1 = vid.clone().to_proposal(&privkey).unwrap().clone(); storage.append_vid2(&vid_share1).await.unwrap(); @@ -237,7 +238,7 @@ mod persistence_tests { Some(convert_proposal(vid_share1.clone())) ); - vid.set_view_number(ViewNumber::new(2)); + vid.view_number = ViewNumber::new(2); let vid_share2 = vid.clone().to_proposal(&privkey).unwrap().clone(); storage.append_vid2(&vid_share2).await.unwrap(); @@ -247,7 +248,7 @@ mod persistence_tests { Some(convert_proposal(vid_share2.clone())) ); - vid.set_view_number(ViewNumber::new(3)); + vid.view_number = ViewNumber::new(3); let vid_share3 = vid.clone().to_proposal(&privkey).unwrap().clone(); storage.append_vid2(&vid_share3).await.unwrap(); @@ -742,7 +743,7 @@ mod persistence_tests { let leaf = Leaf2::from_quorum_proposal(&quorum_proposal); qc.view_number = leaf.view_number(); qc.data.leaf_commit = Committable::commit(&leaf); - vid.data.set_view_number(leaf.view_number()); + vid.data.view_number = leaf.view_number(); da_proposal.data.view_number = leaf.view_number(); chain.push((leaf.clone(), qc.clone(), vid.clone(), da_proposal.clone())); } diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index d87909df1e..e2383a81ca 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -49,7 +49,6 @@ use hotshot_types::{ vote::HasViewNumber, }; use itertools::Itertools; -use jf_vid::VidScheme; use sqlx::Row; use sqlx::{query, Executor}; use std::{collections::BTreeMap, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index 8aadb49c19..755e373d65 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -2,6 +2,7 @@ use std::{ cmp::max, collections::{BTreeMap, BTreeSet, HashMap}, num::NonZeroU64, + sync::Arc, }; use async_trait::async_trait; @@ -25,14 +26,15 @@ use hotshot_types::{ }, PeerConfig, }; -use itertools::Itertools; +use indexmap::IndexMap; use thiserror::Error; use super::{ + traits::StateCatchup, v0_3::{DAMembers, StakeTable, StakeTables}, + v0_99::ChainConfig, Header, L1Client, NodeState, PubKey, SeqTypes, }; -use thiserror::Error; type Epoch = ::Epoch; @@ -71,10 +73,13 @@ impl StakeTables { for change in index_map.values() { if let StakeTableChange::Add(node_info_jf) = change { - let entry: StakeTableEntry = node_info_jf.clone().into(); - stake_table.push(entry.clone()); + let config = PeerConfig { + stake_table_entry: node_info_jf.clone().into(), + state_ver_key: node_info_jf.state_ver_key.clone(), + }; + stake_table.push(config.clone()); if change.is_da() { - da_members.push(entry); + da_members.push(config); } } } @@ -162,7 +167,7 @@ impl EpochCommittees { /// to be called before calling `self.stake()` so that /// `Self.stake_table` only needs to be updated once in a given /// life-cycle but may be read from many times. - fn update_stake_table(&mut self, epoch: EpochNumber, st: StakeTables) -> Committee { + fn update_stake_table(&mut self, epoch: EpochNumber, st: StakeTables) { // This works because `get_stake_table` is fetching *all* // update events and building the table for us. We will need // more subtlety when start fetching only the events since last update. @@ -201,7 +206,14 @@ impl EpochCommittees { .filter(|peer_config| peer_config.stake_table_entry.stake() > U256::zero()) .collect(); - let randomized_committee = generate_stake_cdf(eligible_leaders.clone(), [0u8; 32]); + let randomized_committee = generate_stake_cdf( + eligible_leaders + .clone() + .into_iter() + .map(|l| l.stake_table_entry) + .collect(), + [0u8; 32], + ); let committee = Committee { eligible_leaders, @@ -220,8 +232,6 @@ impl EpochCommittees { .insert(epoch + 1, randomized_committee.clone()); self.randomized_committees .insert(epoch + 2, randomized_committee.clone()); - - committee } // We need a constructor to match our concrete type. @@ -275,7 +285,14 @@ impl EpochCommittees { ) }) .collect(); - let randomized_committee = generate_stake_cdf(eligible_leaders.clone(), [0u8; 32]); + let randomized_committee = generate_stake_cdf( + eligible_leaders + .clone() + .into_iter() + .map(|l| l.stake_table_entry) + .collect(), + [0u8; 32], + ); let members = Committee { eligible_leaders, @@ -302,6 +319,7 @@ impl EpochCommittees { chain_config: instance_state.chain_config, peers: instance_state.peers.clone(), randomized_committees, + initial_drb_result: None, } } @@ -331,26 +349,20 @@ impl Membership for EpochCommittees { panic!("EpochCommittees::new() called. This function has been replaced with new_stake()"); } /// Get the stake table for the current view - fn stake_table(&self, epoch: Option) -> Vec> { - let st = if let Some(st) = self.state(&epoch) { + fn stake_table(&self, epoch: Option) -> Vec> { + if let Some(st) = self.state(&epoch) { st.stake_table.clone() } else { vec![] - }; - - tracing::debug!("stake table = {st:?}"); - st + } } /// Get the stake table for the current view - fn da_stake_table(&self, epoch: Option) -> Vec> { - let da = if let Some(sc) = self.state(&epoch) { + fn da_stake_table(&self, epoch: Option) -> Vec> { + if let Some(sc) = self.state(&epoch) { sc.da_members.clone() } else { vec![] - }; - - tracing::debug!("da members = {da:?}"); - da + } } /// Get all members of the committee for the current view @@ -359,15 +371,11 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - let committee = if let Some(sc) = self.state(&epoch) { + if let Some(sc) = self.state(&epoch) { sc.indexed_stake_table.clone().into_keys().collect() } else { BTreeSet::new() - }; - - tracing::debug!("committee={committee:?}"); - - committee + } } /// Get all members of the committee for the current view @@ -376,14 +384,11 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - let da = if let Some(sc) = self.state(&epoch) { + if let Some(sc) = self.state(&epoch) { sc.indexed_da_members.clone().into_keys().collect() } else { BTreeSet::new() - }; - tracing::debug!("da committee={da:?}"); - - da + } } /// Get all eligible leaders of the committee for the current view @@ -392,16 +397,12 @@ impl Membership for EpochCommittees { _view_number: ::View, epoch: Option, ) -> BTreeSet { - let committee_leaders = self - .state(&epoch) + self.state(&epoch) .unwrap() .eligible_leaders .iter() - .map(PubKey::public_key) - .collect(); - - tracing::debug!("committee_leaders={committee_leaders:?}"); - committee_leaders + .map(|x| PubKey::public_key(&x.stake_table_entry)) + .collect() } /// Get the stake table entry for a public key @@ -522,12 +523,12 @@ impl Membership for EpochCommittees { let address = contract_address?; self.l1_client - .get_stake_table(address.to_alloy(), block_header.l1_head()) + .get_stake_table(address.to_alloy(), block_header.height()) .await .ok() .map(|stake_table| -> Box { Box::new(move |committee: &mut Self| { - let _ = committee.update_stake_table(epoch, stake_table); + committee.update_stake_table(epoch, stake_table); }) }) } diff --git a/types/src/v0/mod.rs b/types/src/v0/mod.rs index ddd16ba3be..dd1cc837ad 100644 --- a/types/src/v0/mod.rs +++ b/types/src/v0/mod.rs @@ -121,9 +121,6 @@ reexport_unchanged_types!( TimeBasedUpgrade, ViewBasedUpgrade, BlockSize, - PublicHotShotConfig, - PublicNetworkConfig, - PublicValidatorConfig ); pub(crate) use v0_3::{L1ClientMetrics, L1Event, L1State, L1UpdateTask}; diff --git a/types/src/v0/v0_1/config.rs b/types/src/v0/v0_1/config.rs deleted file mode 100644 index 4f52574f20..0000000000 --- a/types/src/v0/v0_1/config.rs +++ /dev/null @@ -1,262 +0,0 @@ -use std::{num::NonZeroUsize, time::Duration}; - -use anyhow::Context; -use vec1::Vec1; - -use crate::PubKey; -use hotshot_types::network::{ - BuilderType, CombinedNetworkConfig, Libp2pConfig, RandomBuilderConfig, -}; -use hotshot_types::{network::NetworkConfig, HotShotConfig, PeerConfig, ValidatorConfig}; -use serde::{Deserialize, Serialize}; -use tide_disco::Url; - -/// This struct defines the public Hotshot validator configuration. -/// Private key and state key pairs are excluded for security reasons. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicValidatorConfig { - public_key: PubKey, - stake_value: u64, - is_da: bool, - private_key: String, - state_public_key: String, - state_key_pair: String, -} - -impl From> for PublicValidatorConfig { - fn from(v: ValidatorConfig) -> Self { - let ValidatorConfig:: { - public_key, - private_key: _, - stake_value, - state_key_pair, - is_da, - } = v; - - let state_public_key = state_key_pair.ver_key(); - - Self { - public_key, - stake_value, - is_da, - state_public_key: state_public_key.to_string(), - private_key: "*****".into(), - state_key_pair: "*****".into(), - } - } -} - -/// This struct defines the public Hotshot configuration parameters. -/// Our config module features a GET endpoint accessible via the route `/hotshot` to display the hotshot config parameters. -/// Hotshot config has sensitive information like private keys and such fields are excluded from this struct. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicHotShotConfig { - start_threshold: (u64, u64), - num_nodes_with_stake: NonZeroUsize, - known_nodes_with_stake: Vec>, - known_da_nodes: Vec>, - da_staked_committee_size: usize, - fixed_leader_for_gpuvid: usize, - next_view_timeout: u64, - view_sync_timeout: Duration, - num_bootstrap: usize, - builder_timeout: Duration, - data_request_delay: Duration, - builder_urls: Vec1, - start_proposing_view: u64, - stop_proposing_view: u64, - start_voting_view: u64, - stop_voting_view: u64, - start_proposing_time: u64, - stop_proposing_time: u64, - start_voting_time: u64, - stop_voting_time: u64, - epoch_height: u64, -} - -impl From> for PublicHotShotConfig { - fn from(v: HotShotConfig) -> Self { - // Destructure all fields from HotShotConfig to return an error - // if new fields are added to HotShotConfig. This makes sure that we handle - // all fields appropriately and do not miss any updates. - let HotShotConfig:: { - start_threshold, - num_nodes_with_stake, - known_nodes_with_stake, - known_da_nodes, - da_staked_committee_size, - fixed_leader_for_gpuvid, - next_view_timeout, - view_sync_timeout, - num_bootstrap, - builder_timeout, - data_request_delay, - builder_urls, - start_proposing_view, - stop_proposing_view, - start_voting_view, - stop_voting_view, - start_proposing_time, - stop_proposing_time, - start_voting_time, - stop_voting_time, - epoch_height, - } = v; - - Self { - start_threshold, - num_nodes_with_stake, - known_nodes_with_stake, - known_da_nodes, - da_staked_committee_size, - fixed_leader_for_gpuvid, - next_view_timeout, - view_sync_timeout, - num_bootstrap, - builder_timeout, - data_request_delay, - builder_urls, - start_proposing_view, - stop_proposing_view, - start_voting_view, - stop_voting_view, - start_proposing_time, - stop_proposing_time, - start_voting_time, - stop_voting_time, - epoch_height, - } - } -} - -impl PublicHotShotConfig { - pub fn into_hotshot_config(self) -> HotShotConfig { - HotShotConfig { - start_threshold: self.start_threshold, - num_nodes_with_stake: self.num_nodes_with_stake, - known_nodes_with_stake: self.known_nodes_with_stake, - known_da_nodes: self.known_da_nodes, - da_staked_committee_size: self.da_staked_committee_size, - fixed_leader_for_gpuvid: self.fixed_leader_for_gpuvid, - next_view_timeout: self.next_view_timeout, - view_sync_timeout: self.view_sync_timeout, - num_bootstrap: self.num_bootstrap, - builder_timeout: self.builder_timeout, - data_request_delay: self.data_request_delay, - builder_urls: self.builder_urls, - start_proposing_view: self.start_proposing_view, - stop_proposing_view: self.stop_proposing_view, - start_voting_view: self.start_voting_view, - stop_voting_view: self.stop_voting_view, - start_proposing_time: self.start_proposing_time, - stop_proposing_time: self.stop_proposing_time, - start_voting_time: self.start_voting_time, - stop_voting_time: self.stop_voting_time, - epoch_height: self.epoch_height, - } - } - - pub fn known_nodes_with_stake(&self) -> Vec> { - self.known_nodes_with_stake.clone() - } - - pub fn known_da_nodes(&self) -> Vec> { - self.known_da_nodes.clone() - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct PublicNetworkConfig { - rounds: usize, - indexed_da: bool, - transactions_per_round: usize, - manual_start_password: Option, - num_bootrap: usize, - next_view_timeout: u64, - view_sync_timeout: Duration, - builder_timeout: Duration, - data_request_delay: Duration, - node_index: u64, - seed: [u8; 32], - transaction_size: usize, - key_type_name: String, - libp2p_config: Option, - config: PublicHotShotConfig, - cdn_marshal_address: Option, - combined_network_config: Option, - commit_sha: String, - builder: BuilderType, - random_builder: Option, -} - -impl From> for PublicNetworkConfig { - fn from(cfg: NetworkConfig) -> Self { - Self { - rounds: cfg.rounds, - indexed_da: cfg.indexed_da, - transactions_per_round: cfg.transactions_per_round, - manual_start_password: Some("*****".into()), - num_bootrap: cfg.num_bootrap, - next_view_timeout: cfg.next_view_timeout, - view_sync_timeout: cfg.view_sync_timeout, - builder_timeout: cfg.builder_timeout, - data_request_delay: cfg.data_request_delay, - node_index: cfg.node_index, - seed: cfg.seed, - transaction_size: cfg.transaction_size, - key_type_name: cfg.key_type_name, - libp2p_config: cfg.libp2p_config, - config: cfg.config.into(), - cdn_marshal_address: cfg.cdn_marshal_address, - combined_network_config: cfg.combined_network_config, - commit_sha: cfg.commit_sha, - builder: cfg.builder, - random_builder: cfg.random_builder, - } - } -} - -impl PublicNetworkConfig { - pub fn into_network_config( - self, - my_own_validator_config: ValidatorConfig, - ) -> anyhow::Result> { - let node_index = self - .config - .known_nodes_with_stake - .iter() - .position(|peer| peer.stake_table_entry.stake_key == my_own_validator_config.public_key) - .context(format!( - "the node {} is not in the stake table", - my_own_validator_config.public_key - ))? as u64; - - Ok(NetworkConfig { - rounds: self.rounds, - indexed_da: self.indexed_da, - transactions_per_round: self.transactions_per_round, - manual_start_password: self.manual_start_password, - num_bootrap: self.num_bootrap, - next_view_timeout: self.next_view_timeout, - view_sync_timeout: self.view_sync_timeout, - builder_timeout: self.builder_timeout, - data_request_delay: self.data_request_delay, - node_index, - seed: self.seed, - transaction_size: self.transaction_size, - key_type_name: self.key_type_name, - libp2p_config: self.libp2p_config, - config: self.config.into_hotshot_config(), - cdn_marshal_address: self.cdn_marshal_address, - combined_network_config: self.combined_network_config, - commit_sha: self.commit_sha, - builder: self.builder, - random_builder: self.random_builder, - public_keys: Vec::new(), - }) - } - - pub fn hotshot_config(&self) -> PublicHotShotConfig { - self.config.clone() - } -} diff --git a/types/src/v0/v0_1/mod.rs b/types/src/v0/v0_1/mod.rs index 494659e1be..7115342c14 100644 --- a/types/src/v0/v0_1/mod.rs +++ b/types/src/v0/v0_1/mod.rs @@ -4,7 +4,6 @@ pub const VERSION: Version = Version { major: 0, minor: 1 }; mod block; mod chain_config; -mod config; mod fee_info; mod header; mod instance_state; @@ -15,7 +14,6 @@ mod transaction; pub use block::*; pub use chain_config::*; -pub use config::*; pub use fee_info::*; pub use header::Header; pub use instance_state::*; diff --git a/types/src/v0/v0_2/mod.rs b/types/src/v0/v0_2/mod.rs index 5d8acb79f4..ebbfdcce98 100644 --- a/types/src/v0/v0_2/mod.rs +++ b/types/src/v0/v0_2/mod.rs @@ -8,11 +8,10 @@ pub use super::v0_1::{ L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, NumTxsUnchecked, Payload, PayloadByteLen, - PublicHotShotConfig, PublicNetworkConfig, PublicValidatorConfig, ResolvableChainConfig, - TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, TxPayloadRange, TxProof, - TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, - BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, - NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, + ResolvableChainConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, + TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, + NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; pub const VERSION: Version = Version { major: 0, minor: 2 }; diff --git a/types/src/v0/v0_3/header.rs b/types/src/v0/v0_3/header.rs index c4dd120916..3d0f8df43e 100644 --- a/types/src/v0/v0_3/header.rs +++ b/types/src/v0/v0_3/header.rs @@ -6,7 +6,7 @@ use super::{ }; use ark_serialize::CanonicalSerialize; use committable::{Commitment, Committable, RawCommitmentBuilder}; -use hotshot_types::{utils::BuilderCommitment, vid::VidCommitment}; +use hotshot_types::{data::VidCommitment, utils::BuilderCommitment}; use serde::{Deserialize, Serialize}; /// A header is like a [`Block`] with the body replaced by a digest. @@ -45,7 +45,7 @@ impl Committable for Header { .u64_field("l1_head", self.l1_head) .optional("l1_finalized", &self.l1_finalized) .constant_str("payload_commitment") - .fixed_size_bytes(self.payload_commitment.as_ref().as_ref()) + .fixed_size_bytes(self.payload_commitment.as_ref()) .constant_str("builder_commitment") .fixed_size_bytes(self.builder_commitment.as_ref()) .field("ns_table", self.ns_table.commit()) diff --git a/types/src/v0/v0_3/mod.rs b/types/src/v0/v0_3/mod.rs index b96fb014a9..ec59b858b4 100644 --- a/types/src/v0/v0_3/mod.rs +++ b/types/src/v0/v0_3/mod.rs @@ -7,9 +7,8 @@ pub use super::v0_1::{ FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, - NumTxsUnchecked, Payload, PayloadByteLen, PublicHotShotConfig, PublicNetworkConfig, - PublicValidatorConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, - TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + NumTxsUnchecked, Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, + TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; diff --git a/types/src/v0/v0_99/mod.rs b/types/src/v0/v0_99/mod.rs index f45ce0bd5d..3e676a4b2a 100644 --- a/types/src/v0/v0_99/mod.rs +++ b/types/src/v0/v0_99/mod.rs @@ -7,9 +7,8 @@ pub use super::v0_1::{ FeeMerkleTree, Index, Iter, L1BlockInfo, L1Client, L1ClientOptions, L1Snapshot, NamespaceId, NsIndex, NsIter, NsPayload, NsPayloadBuilder, NsPayloadByteLen, NsPayloadOwned, NsPayloadRange, NsProof, NsTable, NsTableBuilder, NsTableValidationError, NumNss, NumTxs, NumTxsRange, - NumTxsUnchecked, Payload, PayloadByteLen, PublicHotShotConfig, PublicNetworkConfig, - PublicValidatorConfig, TimeBasedUpgrade, Transaction, TxIndex, TxIter, TxPayload, - TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, + NumTxsUnchecked, Payload, PayloadByteLen, TimeBasedUpgrade, Transaction, TxIndex, TxIter, + TxPayload, TxPayloadRange, TxProof, TxTableEntries, TxTableEntriesRange, Upgrade, UpgradeMode, UpgradeType, ViewBasedUpgrade, BLOCK_MERKLE_TREE_HEIGHT, FEE_MERKLE_TREE_HEIGHT, NS_ID_BYTE_LEN, NS_OFFSET_BYTE_LEN, NUM_NSS_BYTE_LEN, NUM_TXS_BYTE_LEN, TX_OFFSET_BYTE_LEN, }; From 850d2de0f5bd138c28d13b9882f2767090c19d2b Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Mar 2025 03:57:52 +0500 Subject: [PATCH 108/120] remove duplicate migration --- .../migrations/postgres/V500__leaf2_migration.sql | 15 --------------- .../migrations/sqlite/V300__leaf2_migration.sql | 15 --------------- 2 files changed, 30 deletions(-) delete mode 100644 hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql delete mode 100644 hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql diff --git a/hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql b/hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql deleted file mode 100644 index 69f562df7b..0000000000 --- a/hotshot-query-service/migrations/postgres/V500__leaf2_migration.sql +++ /dev/null @@ -1,15 +0,0 @@ -CREATE TABLE leaf2 -( - height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, - hash VARCHAR NOT NULL UNIQUE, - block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, - leaf JSONB NOT NULL, - qc JSONB NOT NULL -); - -CREATE TABLE leaf_migration ( - id SERIAL PRIMARY KEY, - completed bool NOT NULL DEFAULT false -); - -INSERT INTO leaf_migration ("completed") VALUES (false); \ No newline at end of file diff --git a/hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql b/hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql deleted file mode 100644 index ce22030d5b..0000000000 --- a/hotshot-query-service/migrations/sqlite/V300__leaf2_migration.sql +++ /dev/null @@ -1,15 +0,0 @@ -CREATE TABLE leaf2 -( - height BIGINT PRIMARY KEY REFERENCES header (height) ON DELETE CASCADE, - hash VARCHAR NOT NULL UNIQUE, - block_hash VARCHAR NOT NULL REFERENCES header (hash) ON DELETE CASCADE, - leaf JSONB NOT NULL, - qc JSONB NOT NULL -); - -CREATE TABLE leaf_migration ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - completed bool NOT NULL DEFAULT false -); - -INSERT INTO leaf_migration ("completed") VALUES (false); \ No newline at end of file From 85756d894c4330748c557191754eb60caabb9e5f Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Mar 2025 17:17:11 +0500 Subject: [PATCH 109/120] fix genesis_vid and parse_ns_table() --- .../src/data_source/update.rs | 62 ++++++++++++++----- hotshot-types/src/data/ns_table.rs | 7 ++- 2 files changed, 50 insertions(+), 19 deletions(-) diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 0f2da97126..f78d1c28f0 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -22,16 +22,19 @@ use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; -use hotshot_types::data::{VidDisperseShare, VidShare}; +use hotshot_types::{data::VidCommitment, event::LeafInfo}; use hotshot_types::{ - data::Leaf2, + data::{ns_table::parse_ns_table, Leaf2}, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, vid::advz::advz_scheme, }; -use hotshot_types::{data::VidCommitment, event::LeafInfo}; +use hotshot_types::{ + data::{VidDisperseShare, VidShare}, + vid::avidm::{init_avidm_param, AvidMScheme}, +}; use jf_vid::VidScheme; use std::iter::once; @@ -168,19 +171,46 @@ fn genesis_vid( ) -> anyhow::Result<(VidCommonQueryData, VidShare)> { let payload = Payload::::empty().0; let bytes = payload.encode(); - let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES) - .disperse(bytes) - .context("unable to compute VID dispersal for genesis block")?; - ensure!( - VidCommitment::V0(disperse.commit) == leaf.block_header().payload_commitment(), - "computed VID commit {} for genesis block does not match header commit {}", - disperse.commit, - leaf.block_header().payload_commitment() - ); - Ok(( - VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), - VidShare::V0(disperse.shares.remove(0)), - )) + + match leaf.block_header().payload_commitment() { + VidCommitment::V0(commit) => { + let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES) + .disperse(bytes) + .context("unable to compute VID dispersal for genesis block")?; + + ensure!( + disperse.commit == commit, + "computed VID commit {} for genesis block does not match header commit {}", + disperse.commit, + commit + ); + Ok(( + VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), + VidShare::V0(disperse.shares.remove(0)), + )) + } + VidCommitment::V1(commit) => { + let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?; + let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES]; + tracing::error!(">>>0"); + let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode()); + tracing::error!(">>>1"); + let (calculated_commit, mut shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &bytes, ns_table).unwrap(); + + ensure!( + calculated_commit == commit, + "computed VID commit {} for genesis block does not match header commit {}", + calculated_commit, + commit + ); + + Ok(( + VidCommonQueryData::new(leaf.block_header().clone(), None), + VidShare::V1(shares.remove(0)), + )) + } + } } /// A data source with an atomic transaction-based synchronization interface. diff --git a/hotshot-types/src/data/ns_table.rs b/hotshot-types/src/data/ns_table.rs index c645c36957..fac0792dd6 100644 --- a/hotshot-types/src/data/ns_table.rs +++ b/hotshot-types/src/data/ns_table.rs @@ -26,9 +26,10 @@ pub fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec return vec![(0..payload_byte_len)]; } let num_entries = u32::from_le_bytes(bytes[..NUM_NSS_BYTE_LEN].try_into().unwrap()) as usize; - if num_entries - != bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) - / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN) + if num_entries == 0 + || num_entries + != bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) + / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN) { tracing::warn!("Failed to parse the metadata as namespace table. Use a single namespace table instead."); return vec![(0..payload_byte_len)]; From 9de78eaba043ba5429ff496a673053b65f538d3c Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Mar 2025 19:18:33 +0500 Subject: [PATCH 110/120] increase epoch height to 30 --- data/genesis/demo-pos-base.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/genesis/demo-pos-base.toml b/data/genesis/demo-pos-base.toml index d909148ef6..3fc79f79d2 100644 --- a/data/genesis/demo-pos-base.toml +++ b/data/genesis/demo-pos-base.toml @@ -1,6 +1,6 @@ base_version = "0.3" upgrade_version = "0.3" -epoch_height = 10 +epoch_height = 30 [stake_table] capacity = 10 From 6a745cea29da3c4c269ef17e4e775911e8a4dc6e Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Fri, 7 Mar 2025 19:42:44 +0500 Subject: [PATCH 111/120] do not store stake table for e+1, e+2 when updating epoch committees --- data/genesis/demo-pos.toml | 2 +- hotshot-types/src/data/ns_table.rs | 1 - types/src/v0/impls/stake_table.rs | 9 ++------- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/data/genesis/demo-pos.toml b/data/genesis/demo-pos.toml index 6414cab0d2..8b4f23266c 100644 --- a/data/genesis/demo-pos.toml +++ b/data/genesis/demo-pos.toml @@ -1,6 +1,6 @@ base_version = "0.2" upgrade_version = "0.3" -epoch_height = 10 +epoch_height = 30 [stake_table] capacity = 10 diff --git a/hotshot-types/src/data/ns_table.rs b/hotshot-types/src/data/ns_table.rs index fac0792dd6..af9b1072d0 100644 --- a/hotshot-types/src/data/ns_table.rs +++ b/hotshot-types/src/data/ns_table.rs @@ -31,7 +31,6 @@ pub fn parse_ns_table(payload_byte_len: usize, bytes: &[u8]) -> Vec != bytes.len().saturating_sub(NUM_NSS_BYTE_LEN) / NS_ID_BYTE_LEN.saturating_add(NS_OFFSET_BYTE_LEN) { - tracing::warn!("Failed to parse the metadata as namespace table. Use a single namespace table instead."); return vec![(0..payload_byte_len)]; } let mut l = 0; diff --git a/types/src/v0/impls/stake_table.rs b/types/src/v0/impls/stake_table.rs index b1c4e3440f..769e5380c7 100644 --- a/types/src/v0/impls/stake_table.rs +++ b/types/src/v0/impls/stake_table.rs @@ -224,14 +224,9 @@ impl EpochCommittees { }; self.state.insert(epoch, committee.clone()); - self.state.insert(epoch + 1, committee.clone()); - self.state.insert(epoch + 2, committee.clone()); + self.randomized_committees .insert(epoch, randomized_committee.clone()); - self.randomized_committees - .insert(epoch + 1, randomized_committee.clone()); - self.randomized_committees - .insert(epoch + 2, randomized_committee.clone()); } // We need a constructor to match our concrete type. @@ -306,7 +301,7 @@ impl EpochCommittees { // TODO: remove this, workaround for hotshot asking for stake tables from epoch 1 and 2 let mut map = HashMap::new(); - for epoch in Epoch::genesis().u64()..=50 { + for epoch in Epoch::genesis().u64()..=2 { map.insert(Epoch::new(epoch), members.clone()); randomized_committees.insert(Epoch::new(epoch), randomized_committee.clone()); } From c1a8f26998332f548b465c19b88a06a6a5c935aa Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 10:28:52 +0500 Subject: [PATCH 112/120] fmt --- builder/src/lib.rs | 8 +- client/src/lib.rs | 2 +- contract-bindings-alloy/src/erc1967proxy.rs | 38 +- contract-bindings-alloy/src/feecontract.rs | 252 +++++++------- contract-bindings-alloy/src/iplonkverifier.rs | 14 +- contract-bindings-alloy/src/lightclient.rs | 296 ++++++++-------- .../src/lightclientarbitrum.rs | 294 ++++++++-------- .../src/lightclientmock.rs | 328 +++++++++--------- .../src/permissionedstaketable.rs | 142 ++++---- contract-bindings-alloy/src/plonkverifier.rs | 36 +- contract-bindings-alloy/src/plonkverifier2.rs | 38 +- contract-bindings-ethers/src/erc1967_proxy.rs | 12 +- contract-bindings-ethers/src/fee_contract.rs | 52 +-- contract-bindings-ethers/src/light_client.rs | 64 ++-- .../src/light_client_arbitrum.rs | 64 ++-- .../src/light_client_mock.rs | 66 ++-- .../src/permissioned_stake_table.rs | 22 +- .../src/plonk_verifier.rs | 4 +- contracts/rust/diff-test/src/main.rs | 42 +-- hotshot-builder-api/src/api.rs | 2 +- .../src/service.rs | 26 +- hotshot-builder-core/src/builder_state.rs | 8 +- hotshot-builder-core/src/service.rs | 238 ++++++------- .../src/testing/basic_test.rs | 2 +- hotshot-builder-core/src/testing/mod.rs | 4 +- hotshot-events-service/src/api.rs | 2 +- hotshot-events-service/src/events_source.rs | 2 +- hotshot-example-types/src/testable_delay.rs | 8 +- hotshot-examples/infra/mod.rs | 22 +- hotshot-fakeapi/src/fake_solver.rs | 4 +- .../src/network/behaviours/dht/bootstrap.rs | 16 +- .../src/network/behaviours/dht/mod.rs | 40 +-- .../behaviours/dht/store/persistent.rs | 8 +- .../src/network/behaviours/direct_message.rs | 10 +- hotshot-libp2p-networking/src/network/cbor.rs | 10 +- hotshot-libp2p-networking/src/network/node.rs | 80 ++--- .../src/network/transport.rs | 4 +- hotshot-macros/src/lib.rs | 8 +- hotshot-orchestrator/src/client.rs | 2 +- hotshot-query-service/src/api.rs | 2 +- hotshot-query-service/src/availability.rs | 8 +- .../src/data_source/fetching.rs | 42 +-- .../src/data_source/fetching/header.rs | 16 +- .../src/data_source/fetching/leaf.rs | 18 +- .../src/data_source/storage/fail_storage.rs | 4 +- .../src/data_source/storage/fs.rs | 14 +- .../src/data_source/storage/sql.rs | 6 +- .../src/data_source/storage/sql/queries.rs | 12 +- .../storage/sql/queries/explorer.rs | 14 +- .../data_source/storage/sql/queries/node.rs | 12 +- .../data_source/storage/sql/queries/state.rs | 14 +- .../data_source/storage/sql/transaction.rs | 12 +- .../src/data_source/update.rs | 12 +- hotshot-query-service/src/explorer.rs | 4 +- hotshot-query-service/src/explorer/errors.rs | 2 +- .../src/explorer/monetary_value.rs | 10 +- .../src/explorer/query_data.rs | 2 +- hotshot-query-service/src/fetching.rs | 4 +- .../src/fetching/provider/any.rs | 2 +- .../src/fetching/provider/query_service.rs | 28 +- hotshot-stake-table/src/mt_based.rs | 8 +- hotshot-stake-table/src/mt_based/internal.rs | 18 +- hotshot-stake-table/src/vec_based.rs | 4 +- hotshot-stake-table/src/vec_based/config.rs | 2 +- hotshot-state-prover/src/service.rs | 12 +- hotshot-task-impls/src/builder.rs | 4 +- hotshot-task-impls/src/consensus/mod.rs | 14 +- hotshot-task-impls/src/da.rs | 12 +- hotshot-task-impls/src/events.rs | 94 ++--- hotshot-task-impls/src/helpers.rs | 8 +- hotshot-task-impls/src/network.rs | 150 ++++---- .../src/quorum_proposal/handlers.rs | 12 +- hotshot-task-impls/src/quorum_proposal/mod.rs | 52 +-- .../src/quorum_proposal_recv/handlers.rs | 2 +- .../src/quorum_proposal_recv/mod.rs | 8 +- .../src/quorum_vote/handlers.rs | 10 +- hotshot-task-impls/src/quorum_vote/mod.rs | 30 +- hotshot-task-impls/src/request.rs | 6 +- hotshot-task-impls/src/response.rs | 12 +- hotshot-task-impls/src/rewind.rs | 2 +- hotshot-task-impls/src/transactions.rs | 42 +-- hotshot-task-impls/src/upgrade.rs | 8 +- hotshot-task-impls/src/vid.rs | 10 +- hotshot-task-impls/src/view_sync.rs | 36 +- hotshot-task-impls/src/vote_collection.rs | 12 +- hotshot-task/src/dependency.rs | 6 +- hotshot-task/src/task.rs | 6 +- hotshot-testing/src/block_builder/mod.rs | 12 +- hotshot-testing/src/block_builder/random.rs | 10 +- hotshot-testing/src/block_builder/simple.rs | 16 +- .../src/byzantine/byzantine_behaviour.rs | 18 +- hotshot-testing/src/consistency_task.rs | 12 +- hotshot-testing/src/predicates/event.rs | 2 +- hotshot-testing/src/script.rs | 2 +- hotshot-testing/src/spinning_task.rs | 16 +- hotshot-testing/src/test_builder.rs | 6 +- hotshot-testing/src/test_runner.rs | 10 +- hotshot-testing/src/test_task.rs | 10 +- hotshot-testing/src/txn_task.rs | 4 +- hotshot-testing/src/view_generator.rs | 2 +- hotshot-testing/src/view_sync_task.rs | 4 +- hotshot-testing/tests/tests_6/test_epochs.rs | 14 +- hotshot-types/src/consensus.rs | 2 +- hotshot-types/src/data.rs | 24 +- hotshot-types/src/lib.rs | 4 +- hotshot-types/src/message.rs | 62 ++-- hotshot-types/src/network.rs | 4 +- hotshot-types/src/traits/storage.rs | 4 +- hotshot-types/src/vote.rs | 2 +- hotshot-utils/src/anytrace.rs | 26 +- hotshot-utils/src/anytrace/macros.rs | 12 +- hotshot/src/lib.rs | 8 +- hotshot/src/tasks/mod.rs | 6 +- .../src/traits/networking/combined_network.rs | 8 +- .../src/traits/networking/libp2p_network.rs | 22 +- .../src/traits/networking/memory_network.rs | 10 +- .../src/traits/networking/push_cdn_network.rs | 2 +- hotshot/src/types/handle.rs | 8 +- marketplace-builder-core/src/service.rs | 16 +- .../src/testing/order_test.rs | 8 +- .../src/coordinator/mod.rs | 14 +- .../src/coordinator/tiered_view_map.rs | 4 +- marketplace-builder-shared/src/error.rs | 6 +- marketplace-builder-shared/src/state.rs | 6 +- .../src/testing/consensus.rs | 4 +- .../src/testing/generation.rs | 14 +- .../src/utils/event_service_wrapper.rs | 16 +- marketplace-builder/src/builder.rs | 8 +- marketplace-builder/src/hooks.rs | 10 +- marketplace-solver/src/api.rs | 2 +- marketplace-solver/src/database.rs | 2 +- marketplace-solver/src/events.rs | 2 +- marketplace-solver/src/testing.rs | 4 +- .../src/api/node_validator/v0/cdn/mod.rs | 20 +- .../v0/create_node_validator_api.rs | 26 +- node-metrics/src/api/node_validator/v0/mod.rs | 24 +- node-metrics/src/lib.rs | 8 +- .../src/service/client_message/mod.rs | 2 +- node-metrics/src/service/client_state/mod.rs | 38 +- node-metrics/src/service/data_state/mod.rs | 10 +- request-response/src/lib.rs | 10 +- request-response/src/message.rs | 14 +- sequencer/src/api.rs | 14 +- sequencer/src/api/sql.rs | 10 +- sequencer/src/bin/espresso-bridge.rs | 2 +- sequencer/src/bin/keygen.rs | 6 +- sequencer/src/bin/nasty-client.rs | 34 +- sequencer/src/bin/pub-key.rs | 6 +- sequencer/src/bin/reset-storage.rs | 4 +- sequencer/src/bin/submit-transactions.rs | 2 +- .../bin/update-permissioned-stake-table.rs | 8 +- sequencer/src/bin/utils/keygen.rs | 6 +- sequencer/src/bin/utils/main.rs | 2 +- sequencer/src/bin/utils/pubkey.rs | 6 +- sequencer/src/bin/utils/reset_storage.rs | 6 +- sequencer/src/bin/verify-headers.rs | 6 +- sequencer/src/catchup.rs | 8 +- sequencer/src/external_event_handler.rs | 10 +- sequencer/src/genesis.rs | 8 +- sequencer/src/lib.rs | 8 +- sequencer/src/options.rs | 14 +- sequencer/src/persistence/fs.rs | 6 +- sequencer/src/persistence/sql.rs | 20 +- sequencer/src/proposal_fetcher.rs | 4 +- .../src/request_response/recipient_source.rs | 2 +- sequencer/src/restart_tests.rs | 2 +- sequencer/src/run.rs | 4 +- sequencer/src/state.rs | 4 +- sequencer/src/state_signature.rs | 4 +- sequencer/src/state_signature/relay_server.rs | 6 +- .../v0/impls/block/full_payload/ns_proof.rs | 10 +- .../v0/impls/block/full_payload/payload.rs | 2 +- .../impls/block/namespace_payload/tx_proof.rs | 8 +- types/src/v0/impls/fee_info.rs | 14 +- types/src/v0/impls/header.rs | 6 +- types/src/v0/impls/instance_state.rs | 4 +- types/src/v0/impls/l1.rs | 10 +- types/src/v0/impls/solver.rs | 2 +- types/src/v0/impls/state.rs | 4 +- types/src/v0/traits.rs | 16 +- types/src/v0/utils.rs | 4 +- utils/src/lib.rs | 20 +- 182 files changed, 2004 insertions(+), 2006 deletions(-) diff --git a/builder/src/lib.rs b/builder/src/lib.rs index f1e22d9f30..63d80958e4 100755 --- a/builder/src/lib.rs +++ b/builder/src/lib.rs @@ -414,10 +414,10 @@ pub mod testing { { Ok(response) => { tracing::info!("Received txn submitted response : {:?}", response); - } + }, Err(e) => { panic!("Error submitting private transaction {:?}", e); - } + }, } let seed = [207_u8; 32]; @@ -514,10 +514,10 @@ pub mod testing { Ok(response) => { tracing::info!("Received Builder Key : {:?}", response); assert_eq!(response, builder_pub_key); - } + }, Err(e) => { panic!("Error getting builder key {:?}", e); - } + }, } } } diff --git a/client/src/lib.rs b/client/src/lib.rs index aecf930e70..bb7c9a252a 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -113,7 +113,7 @@ impl SequencerClient { } else { sleep(Duration::from_millis(200)).await; } - } + }, } }; diff --git a/contract-bindings-alloy/src/erc1967proxy.rs b/contract-bindings-alloy/src/erc1967proxy.rs index 2f9998b31e..b6531e512e 100644 --- a/contract-bindings-alloy/src/erc1967proxy.rs +++ b/contract-bindings-alloy/src/erc1967proxy.rs @@ -143,7 +143,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -209,7 +209,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -275,7 +275,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -335,7 +335,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -501,7 +501,7 @@ pub mod ERC1967Proxy { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -580,16 +580,16 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -674,18 +674,18 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(inner) => { ::abi_encoded_size(inner) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encoded_size(inner) - } + }, Self::FailedInnerCall(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -693,18 +693,18 @@ pub mod ERC1967Proxy { match self { Self::AddressEmptyCode(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FailedInnerCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -739,7 +739,7 @@ pub mod ERC1967Proxy { Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -763,7 +763,7 @@ pub mod ERC1967Proxy { match self { Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/feecontract.rs b/contract-bindings-alloy/src/feecontract.rs index 0eedfa5680..8fb181188b 100644 --- a/contract-bindings-alloy/src/feecontract.rs +++ b/contract-bindings-alloy/src/feecontract.rs @@ -494,7 +494,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -558,7 +558,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -618,7 +618,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -680,7 +680,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -746,7 +746,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -806,7 +806,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -866,7 +866,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -926,7 +926,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -986,7 +986,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1046,7 +1046,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1106,7 +1106,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1168,7 +1168,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1234,7 +1234,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1298,7 +1298,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1360,7 +1360,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2050,7 +2050,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2116,7 +2116,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2145,7 +2145,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2228,7 +2228,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2257,7 +2257,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2342,7 +2342,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2371,7 +2371,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2458,7 +2458,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2491,7 +2491,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2580,7 +2580,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2609,7 +2609,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2694,7 +2694,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2723,7 +2723,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2804,7 +2804,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2833,7 +2833,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2914,7 +2914,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2943,7 +2943,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3024,7 +3024,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3053,7 +3053,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3132,7 +3132,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3161,7 +3161,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3242,7 +3242,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3271,7 +3271,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3363,7 +3363,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3395,7 +3395,7 @@ pub mod FeeContract { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3502,28 +3502,28 @@ pub mod FeeContract { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::balances(_) => ::SELECTOR, Self::deposit(_) => ::SELECTOR, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::maxDepositAmount(_) => { ::SELECTOR - } + }, Self::minDepositAmount(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -3702,40 +3702,40 @@ pub mod FeeContract { ::abi_encoded_size( inner, ) - } + }, Self::balances(inner) => { ::abi_encoded_size(inner) - } + }, Self::deposit(inner) => { ::abi_encoded_size(inner) - } + }, Self::getVersion(inner) => { ::abi_encoded_size(inner) - } + }, Self::initialize(inner) => { ::abi_encoded_size(inner) - } + }, Self::maxDepositAmount(inner) => { ::abi_encoded_size(inner) - } + }, Self::minDepositAmount(inner) => { ::abi_encoded_size(inner) - } + }, Self::owner(inner) => { ::abi_encoded_size(inner) - } + }, Self::proxiableUUID(inner) => { ::abi_encoded_size(inner) - } + }, Self::renounceOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::transferOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3745,40 +3745,40 @@ pub mod FeeContract { ::abi_encode_raw( inner, out, ) - } + }, Self::balances(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::deposit(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::maxDepositAmount(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::minDepositAmount(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3836,49 +3836,49 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::DepositTooLarge(_) => { ::SELECTOR - } + }, Self::DepositTooSmall(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::FunctionDoesNotExist(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidUserAddress(_) => { ::SELECTOR - } + }, Self::NoFunctionCalled(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, } } #[inline] @@ -4095,57 +4095,57 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(inner) => { ::abi_encoded_size(inner) - } + }, Self::DepositTooLarge(inner) => { ::abi_encoded_size(inner) - } + }, Self::DepositTooSmall(inner) => { ::abi_encoded_size(inner) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encoded_size(inner) - } + }, Self::FailedInnerCall(inner) => { ::abi_encoded_size(inner) - } + }, Self::FunctionDoesNotExist(inner) => { ::abi_encoded_size(inner) - } + }, Self::InvalidInitialization(inner) => { ::abi_encoded_size(inner) - } + }, Self::InvalidUserAddress(inner) => { ::abi_encoded_size(inner) - } + }, Self::NoFunctionCalled(inner) => { ::abi_encoded_size(inner) - } + }, Self::NotInitializing(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::UUPSUnauthorizedCallContext(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::UUPSUnsupportedProxiableUUID(inner) => { ::abi_encoded_size( inner, ) - } + }, } } #[inline] @@ -4153,57 +4153,57 @@ pub mod FeeContract { match self { Self::AddressEmptyCode(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::DepositTooLarge(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::DepositTooSmall(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::ERC1967InvalidImplementation(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::ERC1967NonPayable(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FailedInnerCall(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::FunctionDoesNotExist(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::InvalidInitialization(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::InvalidUserAddress(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NoFunctionCalled(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NotInitializing(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::UUPSUnauthorizedCallContext(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::UUPSUnsupportedProxiableUUID(inner) => { ::abi_encode_raw( inner, out, ) - } + }, } } } @@ -4270,31 +4270,31 @@ pub mod FeeContract { Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Deposit) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Log) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -4314,11 +4314,11 @@ pub mod FeeContract { Self::Deposit(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Log(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -4328,15 +4328,15 @@ pub mod FeeContract { Self::Deposit(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Log(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/iplonkverifier.rs b/contract-bindings-alloy/src/iplonkverifier.rs index d4af84cd19..6bb2d8068c 100644 --- a/contract-bindings-alloy/src/iplonkverifier.rs +++ b/contract-bindings-alloy/src/iplonkverifier.rs @@ -280,7 +280,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1373,7 +1373,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1917,7 +1917,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2459,7 +2459,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2492,7 +2492,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2622,7 +2622,7 @@ pub mod IPlonkVerifier { match self { Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -2630,7 +2630,7 @@ pub mod IPlonkVerifier { match self { Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclient.rs b/contract-bindings-alloy/src/lightclient.rs index a2e27b0672..9594063b1e 100644 --- a/contract-bindings-alloy/src/lightclient.rs +++ b/contract-bindings-alloy/src/lightclient.rs @@ -280,7 +280,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -658,7 +658,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2401,7 +2401,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2619,7 +2619,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2839,7 +2839,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2905,7 +2905,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2971,7 +2971,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3031,7 +3031,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3091,7 +3091,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3151,7 +3151,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3211,7 +3211,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3271,7 +3271,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3331,7 +3331,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3391,7 +3391,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3451,7 +3451,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3511,7 +3511,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3571,7 +3571,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3631,7 +3631,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3693,7 +3693,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3759,7 +3759,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3823,7 +3823,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3883,7 +3883,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3945,7 +3945,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4009,7 +4009,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4786,7 +4786,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4852,7 +4852,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4881,7 +4881,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4962,7 +4962,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4991,7 +4991,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5070,7 +5070,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5099,7 +5099,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5182,7 +5182,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5219,7 +5219,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5311,7 +5311,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5350,7 +5350,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5448,7 +5448,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5485,7 +5485,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5577,7 +5577,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5612,7 +5612,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5700,7 +5700,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5729,7 +5729,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5812,7 +5812,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5845,7 +5845,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5947,7 +5947,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5986,7 +5986,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6083,7 +6083,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6112,7 +6112,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6202,7 +6202,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6234,7 +6234,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6329,7 +6329,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6361,7 +6361,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6445,7 +6445,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6474,7 +6474,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6555,7 +6555,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6584,7 +6584,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6665,7 +6665,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6694,7 +6694,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6773,7 +6773,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6802,7 +6802,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6883,7 +6883,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6912,7 +6912,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6997,7 +6997,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7028,7 +7028,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7118,7 +7118,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7157,7 +7157,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7257,7 +7257,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7286,7 +7286,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7367,7 +7367,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7396,7 +7396,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7477,7 +7477,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7506,7 +7506,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7598,7 +7598,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7630,7 +7630,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7761,66 +7761,66 @@ pub mod LightClient { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8270,98 +8270,98 @@ pub mod LightClient { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -8429,56 +8429,56 @@ pub mod LightClient { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9043,17 +9043,17 @@ pub mod LightClient { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -9065,15 +9065,15 @@ pub mod LightClient { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -9092,17 +9092,17 @@ pub mod LightClient { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -9111,23 +9111,23 @@ pub mod LightClient { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclientarbitrum.rs b/contract-bindings-alloy/src/lightclientarbitrum.rs index 1edab7451d..b679250de0 100644 --- a/contract-bindings-alloy/src/lightclientarbitrum.rs +++ b/contract-bindings-alloy/src/lightclientarbitrum.rs @@ -280,7 +280,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -658,7 +658,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1294,7 +1294,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1512,7 +1512,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2983,7 +2983,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3049,7 +3049,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3115,7 +3115,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3175,7 +3175,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3235,7 +3235,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3295,7 +3295,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3355,7 +3355,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3415,7 +3415,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3475,7 +3475,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3535,7 +3535,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3595,7 +3595,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3655,7 +3655,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3715,7 +3715,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3775,7 +3775,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3837,7 +3837,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3903,7 +3903,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3967,7 +3967,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4027,7 +4027,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4089,7 +4089,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4153,7 +4153,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4942,7 +4942,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4971,7 +4971,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5052,7 +5052,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5081,7 +5081,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5160,7 +5160,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5189,7 +5189,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5272,7 +5272,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5309,7 +5309,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5401,7 +5401,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5440,7 +5440,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5538,7 +5538,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5575,7 +5575,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5667,7 +5667,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5702,7 +5702,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5790,7 +5790,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5819,7 +5819,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5902,7 +5902,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5935,7 +5935,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6038,7 +6038,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6077,7 +6077,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6176,7 +6176,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6205,7 +6205,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6295,7 +6295,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6327,7 +6327,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6423,7 +6423,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6455,7 +6455,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6541,7 +6541,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6570,7 +6570,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6651,7 +6651,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6680,7 +6680,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6761,7 +6761,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6790,7 +6790,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6869,7 +6869,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6898,7 +6898,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6979,7 +6979,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7008,7 +7008,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7093,7 +7093,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7124,7 +7124,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7214,7 +7214,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7253,7 +7253,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7353,7 +7353,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7382,7 +7382,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7463,7 +7463,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7492,7 +7492,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7573,7 +7573,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7602,7 +7602,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7694,7 +7694,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7726,7 +7726,7 @@ pub mod LightClientArbitrum { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7857,66 +7857,66 @@ pub mod LightClientArbitrum { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -8363,98 +8363,98 @@ pub mod LightClientArbitrum { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -8522,56 +8522,56 @@ pub mod LightClientArbitrum { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9136,17 +9136,17 @@ pub mod LightClientArbitrum { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -9158,15 +9158,15 @@ pub mod LightClientArbitrum { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -9185,17 +9185,17 @@ pub mod LightClientArbitrum { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -9204,23 +9204,23 @@ pub mod LightClientArbitrum { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/lightclientmock.rs b/contract-bindings-alloy/src/lightclientmock.rs index 6d30b708d7..3aebe92346 100644 --- a/contract-bindings-alloy/src/lightclientmock.rs +++ b/contract-bindings-alloy/src/lightclientmock.rs @@ -280,7 +280,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -658,7 +658,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1295,7 +1295,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1513,7 +1513,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1745,7 +1745,7 @@ pub mod LightClient { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3389,7 +3389,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3455,7 +3455,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3521,7 +3521,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3581,7 +3581,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3641,7 +3641,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3701,7 +3701,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3761,7 +3761,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3821,7 +3821,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3881,7 +3881,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3941,7 +3941,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4001,7 +4001,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4061,7 +4061,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4121,7 +4121,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4181,7 +4181,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4243,7 +4243,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4309,7 +4309,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4373,7 +4373,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4433,7 +4433,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4495,7 +4495,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -4559,7 +4559,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5349,7 +5349,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5437,7 +5437,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5466,7 +5466,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5547,7 +5547,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5576,7 +5576,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5655,7 +5655,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5684,7 +5684,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5767,7 +5767,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5804,7 +5804,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5896,7 +5896,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -5935,7 +5935,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6033,7 +6033,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6070,7 +6070,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6162,7 +6162,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6197,7 +6197,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6285,7 +6285,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6314,7 +6314,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6397,7 +6397,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6430,7 +6430,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6533,7 +6533,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6572,7 +6572,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6671,7 +6671,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6700,7 +6700,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6790,7 +6790,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6822,7 +6822,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6918,7 +6918,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -6950,7 +6950,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7036,7 +7036,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7065,7 +7065,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7146,7 +7146,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7175,7 +7175,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7256,7 +7256,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7285,7 +7285,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7364,7 +7364,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7393,7 +7393,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7475,7 +7475,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7504,7 +7504,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7589,7 +7589,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7618,7 +7618,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7701,7 +7701,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7730,7 +7730,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7811,7 +7811,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7840,7 +7840,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7932,7 +7932,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -7963,7 +7963,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8049,7 +8049,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8080,7 +8080,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8170,7 +8170,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8209,7 +8209,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8309,7 +8309,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8338,7 +8338,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8419,7 +8419,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8448,7 +8448,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8529,7 +8529,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8558,7 +8558,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8650,7 +8650,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8682,7 +8682,7 @@ pub mod LightClientMock { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -8821,76 +8821,76 @@ pub mod LightClientMock { match self { Self::UPGRADE_INTERFACE_VERSION(_) => { ::SELECTOR - } + }, Self::currentBlockNumber(_) => { ::SELECTOR - } + }, Self::disablePermissionedProverMode(_) => { ::SELECTOR - } + }, Self::finalizedState(_) => { ::SELECTOR - } + }, Self::genesisStakeTableState(_) => { ::SELECTOR - } + }, Self::genesisState(_) => ::SELECTOR, Self::getHotShotCommitment(_) => { ::SELECTOR - } + }, Self::getStateHistoryCount(_) => { ::SELECTOR - } + }, Self::getVersion(_) => ::SELECTOR, Self::initialize(_) => ::SELECTOR, Self::isPermissionedProverEnabled(_) => { ::SELECTOR - } + }, Self::lagOverEscapeHatchThreshold(_) => { ::SELECTOR - } + }, Self::newFinalizedState(_) => { ::SELECTOR - } + }, Self::owner(_) => ::SELECTOR, Self::permissionedProver(_) => { ::SELECTOR - } + }, Self::proxiableUUID(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::setFinalizedState(_) => { ::SELECTOR - } + }, Self::setHotShotDownSince(_) => { ::SELECTOR - } + }, Self::setHotShotUp(_) => ::SELECTOR, Self::setPermissionedProver(_) => { ::SELECTOR - } + }, Self::setStateHistory(_) => { ::SELECTOR - } + }, Self::setstateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::stateHistoryCommitments(_) => { ::SELECTOR - } + }, Self::stateHistoryFirstIndex(_) => { ::SELECTOR - } + }, Self::stateHistoryRetentionPeriod(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::upgradeToAndCall(_) => { ::SELECTOR - } + }, } } #[inline] @@ -9403,112 +9403,112 @@ pub mod LightClientMock { ::abi_encode_raw( inner, out, ) - } + }, Self::currentBlockNumber(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::disablePermissionedProverMode(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::finalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::genesisStakeTableState(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::genesisState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::getHotShotCommitment(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getStateHistoryCount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::getVersion(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isPermissionedProverEnabled(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::lagOverEscapeHatchThreshold(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::newFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::permissionedProver(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::proxiableUUID(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setFinalizedState(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setHotShotDownSince(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setHotShotUp(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setPermissionedProver(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::setStateHistory(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::setstateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryCommitments(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryFirstIndex(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::stateHistoryRetentionPeriod(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::upgradeToAndCall(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -9576,56 +9576,56 @@ pub mod LightClientMock { match self { Self::AddressEmptyCode(_) => { ::SELECTOR - } + }, Self::ERC1967InvalidImplementation(_) => { ::SELECTOR - } + }, Self::ERC1967NonPayable(_) => { ::SELECTOR - } + }, Self::FailedInnerCall(_) => { ::SELECTOR - } + }, Self::InsufficientSnapshotHistory(_) => { ::SELECTOR - } + }, Self::InvalidAddress(_) => ::SELECTOR, Self::InvalidArgs(_) => ::SELECTOR, Self::InvalidHotShotBlockForCommitmentCheck(_) => { ::SELECTOR - } + }, Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::InvalidMaxStateHistory(_) => { ::SELECTOR - } + }, Self::InvalidProof(_) => ::SELECTOR, Self::NoChangeRequired(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OutdatedState(_) => ::SELECTOR, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::ProverNotPermissioned(_) => { ::SELECTOR - } + }, Self::UUPSUnauthorizedCallContext(_) => { ::SELECTOR - } + }, Self::UUPSUnsupportedProxiableUUID(_) => { ::SELECTOR - } + }, Self::WrongStakeTableUsed(_) => { ::SELECTOR - } + }, } } #[inline] @@ -10190,17 +10190,17 @@ pub mod LightClientMock { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::NewState) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some( ::SIGNATURE_HASH, ) => ::decode_raw_log( @@ -10212,15 +10212,15 @@ pub mod LightClientMock { topics, data, validate, ) .map(Self::PermissionedProverRequired) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgrade) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log(topics, data, validate) .map(Self::Upgraded) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -10239,17 +10239,17 @@ pub mod LightClientMock { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::NewState(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), Self::Upgraded(inner) => alloy_sol_types::private::IntoLogData::to_log_data(inner), } @@ -10258,23 +10258,23 @@ pub mod LightClientMock { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::NewState(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverNotRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::PermissionedProverRequired(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::Upgrade(inner) => alloy_sol_types::private::IntoLogData::into_log_data(inner), Self::Upgraded(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/permissionedstaketable.rs b/contract-bindings-alloy/src/permissionedstaketable.rs index fd58dff88e..790313b4c5 100644 --- a/contract-bindings-alloy/src/permissionedstaketable.rs +++ b/contract-bindings-alloy/src/permissionedstaketable.rs @@ -167,7 +167,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -494,7 +494,7 @@ pub mod EdOnBN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1411,7 +1411,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1613,7 +1613,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1673,7 +1673,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1735,7 +1735,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1801,7 +1801,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1867,7 +1867,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1932,7 +1932,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2316,7 +2316,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2391,7 +2391,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2420,7 +2420,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2501,7 +2501,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2530,7 +2530,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2611,7 +2611,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2640,7 +2640,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2724,7 +2724,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2753,7 +2753,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2836,7 +2836,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2865,7 +2865,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2944,7 +2944,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2973,7 +2973,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3054,7 +3054,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3083,7 +3083,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3179,7 +3179,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3211,7 +3211,7 @@ pub mod PermissionedStakeTable { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3312,15 +3312,15 @@ pub mod PermissionedStakeTable { Self::initialize(_) => ::SELECTOR, Self::initializedAtBlock(_) => { ::SELECTOR - } + }, Self::isStaker(_) => ::SELECTOR, Self::owner(_) => ::SELECTOR, Self::renounceOwnership(_) => { ::SELECTOR - } + }, Self::transferOwnership(_) => { ::SELECTOR - } + }, Self::update(_) => ::SELECTOR, } } @@ -3446,28 +3446,28 @@ pub mod PermissionedStakeTable { match self { Self::_hashBlsKey(inner) => { <_hashBlsKeyCall as alloy_sol_types::SolCall>::abi_encoded_size(inner) - } + }, Self::initialize(inner) => { ::abi_encoded_size(inner) - } + }, Self::initializedAtBlock(inner) => { ::abi_encoded_size(inner) - } + }, Self::isStaker(inner) => { ::abi_encoded_size(inner) - } + }, Self::owner(inner) => { ::abi_encoded_size(inner) - } + }, Self::renounceOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::transferOwnership(inner) => { ::abi_encoded_size(inner) - } + }, Self::update(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3475,28 +3475,28 @@ pub mod PermissionedStakeTable { match self { Self::_hashBlsKey(inner) => { <_hashBlsKeyCall as alloy_sol_types::SolCall>::abi_encode_raw(inner, out) - } + }, Self::initialize(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::initializedAtBlock(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::isStaker(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::owner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::renounceOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::transferOwnership(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::update(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3536,19 +3536,19 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(_) => { ::SELECTOR - } + }, Self::NotInitializing(_) => { ::SELECTOR - } + }, Self::OwnableInvalidOwner(_) => { ::SELECTOR - } + }, Self::OwnableUnauthorizedAccount(_) => { ::SELECTOR - } + }, Self::StakerAlreadyExists(_) => { ::SELECTOR - } + }, Self::StakerNotFound(_) => ::SELECTOR, } } @@ -3658,24 +3658,24 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(inner) => { ::abi_encoded_size(inner) - } + }, Self::NotInitializing(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encoded_size(inner) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encoded_size( inner, ) - } + }, Self::StakerAlreadyExists(inner) => { ::abi_encoded_size(inner) - } + }, Self::StakerNotFound(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3683,24 +3683,24 @@ pub mod PermissionedStakeTable { match self { Self::InvalidInitialization(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::NotInitializing(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableInvalidOwner(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::OwnableUnauthorizedAccount(inner) => { ::abi_encode_raw( inner, out, ) - } + }, Self::StakerAlreadyExists(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::StakerNotFound(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3751,19 +3751,19 @@ pub mod PermissionedStakeTable { topics, data, validate, ) .map(Self::Initialized) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::OwnershipTransferred) - } + }, Some(::SIGNATURE_HASH) => { ::decode_raw_log( topics, data, validate, ) .map(Self::StakersUpdated) - } + }, _ => alloy_sol_types::private::Err(alloy_sol_types::Error::InvalidLog { name: ::NAME, log: alloy_sol_types::private::Box::new( @@ -3782,26 +3782,26 @@ pub mod PermissionedStakeTable { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, Self::StakersUpdated(inner) => { alloy_sol_types::private::IntoLogData::to_log_data(inner) - } + }, } } fn into_log_data(self) -> alloy_sol_types::private::LogData { match self { Self::Initialized(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::OwnershipTransferred(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, Self::StakersUpdated(inner) => { alloy_sol_types::private::IntoLogData::into_log_data(inner) - } + }, } } } diff --git a/contract-bindings-alloy/src/plonkverifier.rs b/contract-bindings-alloy/src/plonkverifier.rs index 363d08a96a..b5a7c3c46b 100644 --- a/contract-bindings-alloy/src/plonkverifier.rs +++ b/contract-bindings-alloy/src/plonkverifier.rs @@ -280,7 +280,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -659,7 +659,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1203,7 +1203,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2607,7 +2607,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2667,7 +2667,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2727,7 +2727,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2809,7 +2809,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2842,7 +2842,7 @@ pub mod PlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2974,7 +2974,7 @@ pub mod PlonkVerifier { match self { Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -2982,7 +2982,7 @@ pub mod PlonkVerifier { match self { Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3016,10 +3016,10 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(_) => { ::SELECTOR - } + }, Self::UnsupportedDegree(_) => { ::SELECTOR - } + }, Self::WrongPlonkVK(_) => ::SELECTOR, } } @@ -3091,13 +3091,13 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(inner) => { ::abi_encoded_size(inner) - } + }, Self::UnsupportedDegree(inner) => { ::abi_encoded_size(inner) - } + }, Self::WrongPlonkVK(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3105,13 +3105,13 @@ pub mod PlonkVerifier { match self { Self::InvalidPlonkArgs(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::UnsupportedDegree(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::WrongPlonkVK(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-alloy/src/plonkverifier2.rs b/contract-bindings-alloy/src/plonkverifier2.rs index 12cc4014ac..1961b83c7c 100644 --- a/contract-bindings-alloy/src/plonkverifier2.rs +++ b/contract-bindings-alloy/src/plonkverifier2.rs @@ -280,7 +280,7 @@ pub mod BN254 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -659,7 +659,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -1203,7 +1203,7 @@ pub mod IPlonkVerifier { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2623,7 +2623,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2690,7 +2690,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2719,7 +2719,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2800,7 +2800,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2829,7 +2829,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2925,7 +2925,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -2958,7 +2958,7 @@ pub mod PlonkVerifier2 { match _t { alloy_sol_types::private::AssertTypeEq::< ::RustType, - >(_) => {} + >(_) => {}, } } #[automatically_derived] @@ -3118,13 +3118,13 @@ pub mod PlonkVerifier2 { match self { Self::P_MOD(inner) => { ::abi_encoded_size(inner) - } + }, Self::R_MOD(inner) => { ::abi_encoded_size(inner) - } + }, Self::verify(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3132,13 +3132,13 @@ pub mod PlonkVerifier2 { match self { Self::P_MOD(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::R_MOD(inner) => { ::abi_encode_raw(inner, out) - } + }, Self::verify(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } @@ -3166,7 +3166,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(_) => { ::SELECTOR - } + }, } } #[inline] @@ -3211,7 +3211,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(inner) => { ::abi_encoded_size(inner) - } + }, } } #[inline] @@ -3219,7 +3219,7 @@ pub mod PlonkVerifier2 { match self { Self::UnsupportedDegree(inner) => { ::abi_encode_raw(inner, out) - } + }, } } } diff --git a/contract-bindings-ethers/src/erc1967_proxy.rs b/contract-bindings-ethers/src/erc1967_proxy.rs index cff5203cba..453bf08c61 100644 --- a/contract-bindings-ethers/src/erc1967_proxy.rs +++ b/contract-bindings-ethers/src/erc1967_proxy.rs @@ -320,7 +320,7 @@ pub mod erc1967_proxy { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), @@ -333,21 +333,21 @@ pub mod erc1967_proxy { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ => false, } } @@ -358,7 +358,7 @@ pub mod erc1967_proxy { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), diff --git a/contract-bindings-ethers/src/fee_contract.rs b/contract-bindings-ethers/src/fee_contract.rs index 06cb0aa83d..38f00a01f0 100644 --- a/contract-bindings-ethers/src/fee_contract.rs +++ b/contract-bindings-ethers/src/fee_contract.rs @@ -1071,32 +1071,32 @@ pub mod fee_contract { Self::DepositTooSmall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FunctionDoesNotExist(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidUserAddress(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NoFunctionCalled(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1107,70 +1107,70 @@ pub mod fee_contract { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ if selector == ::selector( ) => { true - } + }, _ => false, } } @@ -1183,7 +1183,7 @@ pub mod fee_contract { Self::DepositTooSmall(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::FunctionDoesNotExist(element) => ::core::fmt::Display::fmt(element, f), @@ -1196,7 +1196,7 @@ pub mod fee_contract { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } } @@ -1754,7 +1754,7 @@ pub mod fee_contract { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::Balances(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Deposit(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), diff --git a/contract-bindings-ethers/src/light_client.rs b/contract-bindings-ethers/src/light_client.rs index 32daa6fc12..09aa7f02bd 100644 --- a/contract-bindings-ethers/src/light_client.rs +++ b/contract-bindings-ethers/src/light_client.rs @@ -1726,45 +1726,45 @@ pub mod light_client { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1859,7 +1859,7 @@ pub mod light_client { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -1867,7 +1867,7 @@ pub mod light_client { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -1880,7 +1880,7 @@ pub mod light_client { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2176,10 +2176,10 @@ pub mod light_client { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -2777,54 +2777,54 @@ pub mod light_client { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -2837,7 +2837,7 @@ pub mod light_client { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -2855,7 +2855,7 @@ pub mod light_client { Self::SetPermissionedProver(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/light_client_arbitrum.rs b/contract-bindings-ethers/src/light_client_arbitrum.rs index df441f0a6d..13fa3c65d5 100644 --- a/contract-bindings-ethers/src/light_client_arbitrum.rs +++ b/contract-bindings-ethers/src/light_client_arbitrum.rs @@ -1726,45 +1726,45 @@ pub mod light_client_arbitrum { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -1859,7 +1859,7 @@ pub mod light_client_arbitrum { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -1867,7 +1867,7 @@ pub mod light_client_arbitrum { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -1880,7 +1880,7 @@ pub mod light_client_arbitrum { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2178,10 +2178,10 @@ pub mod light_client_arbitrum { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -2779,54 +2779,54 @@ pub mod light_client_arbitrum { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -2839,7 +2839,7 @@ pub mod light_client_arbitrum { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -2857,7 +2857,7 @@ pub mod light_client_arbitrum { Self::SetPermissionedProver(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/light_client_mock.rs b/contract-bindings-ethers/src/light_client_mock.rs index 951f036d38..3132c0d7e7 100644 --- a/contract-bindings-ethers/src/light_client_mock.rs +++ b/contract-bindings-ethers/src/light_client_mock.rs @@ -1867,45 +1867,45 @@ pub mod light_client_mock { Self::AddressEmptyCode(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::ERC1967InvalidImplementation(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ERC1967NonPayable(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::FailedInnerCall(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InsufficientSnapshotHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidAddress(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidArgs(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidMaxStateHistory(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::InvalidProof(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NoChangeRequired(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OutdatedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProverNotPermissioned(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnauthorizedCallContext(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::UUPSUnsupportedProxiableUUID(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::WrongStakeTableUsed(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } } @@ -2000,7 +2000,7 @@ pub mod light_client_mock { Self::AddressEmptyCode(element) => ::core::fmt::Display::fmt(element, f), Self::ERC1967InvalidImplementation(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::ERC1967NonPayable(element) => ::core::fmt::Display::fmt(element, f), Self::FailedInnerCall(element) => ::core::fmt::Display::fmt(element, f), Self::InsufficientSnapshotHistory(element) => ::core::fmt::Display::fmt(element, f), @@ -2008,7 +2008,7 @@ pub mod light_client_mock { Self::InvalidArgs(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidHotShotBlockForCommitmentCheck(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::InvalidInitialization(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidMaxStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::InvalidProof(element) => ::core::fmt::Display::fmt(element, f), @@ -2021,7 +2021,7 @@ pub mod light_client_mock { Self::UUPSUnauthorizedCallContext(element) => ::core::fmt::Display::fmt(element, f), Self::UUPSUnsupportedProxiableUUID(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::WrongStakeTableUsed(element) => ::core::fmt::Display::fmt(element, f), Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), } @@ -2319,10 +2319,10 @@ pub mod light_client_mock { Self::OwnershipTransferredFilter(element) => ::core::fmt::Display::fmt(element, f), Self::PermissionedProverNotRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::PermissionedProverRequiredFilter(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::UpgradeFilter(element) => ::core::fmt::Display::fmt(element, f), Self::UpgradedFilter(element) => ::core::fmt::Display::fmt(element, f), } @@ -3015,60 +3015,60 @@ pub mod light_client_mock { match self { Self::UpgradeInterfaceVersion(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::CurrentBlockNumber(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::DisablePermissionedProverMode(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::FinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GenesisStakeTableState(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GenesisState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::GetHotShotCommitment(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetStateHistoryCount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::GetVersion(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::IsPermissionedProverEnabled(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::LagOverEscapeHatchThreshold(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NewFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::PermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::ProxiableUUID(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetFinalizedState(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetHotShotDownSince(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetHotShotUp(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetPermissionedProver(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::SetStateHistory(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::SetstateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryCommitments(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryFirstIndex(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StateHistoryRetentionPeriod(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::TransferOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::UpgradeToAndCall(element) => ::ethers::core::abi::AbiEncode::encode(element), } @@ -3081,7 +3081,7 @@ pub mod light_client_mock { Self::CurrentBlockNumber(element) => ::core::fmt::Display::fmt(element, f), Self::DisablePermissionedProverMode(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::FinalizedState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisStakeTableState(element) => ::core::fmt::Display::fmt(element, f), Self::GenesisState(element) => ::core::fmt::Display::fmt(element, f), @@ -3103,7 +3103,7 @@ pub mod light_client_mock { Self::SetStateHistory(element) => ::core::fmt::Display::fmt(element, f), Self::SetstateHistoryRetentionPeriod(element) => { ::core::fmt::Display::fmt(element, f) - } + }, Self::StateHistoryCommitments(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryFirstIndex(element) => ::core::fmt::Display::fmt(element, f), Self::StateHistoryRetentionPeriod(element) => ::core::fmt::Display::fmt(element, f), diff --git a/contract-bindings-ethers/src/permissioned_stake_table.rs b/contract-bindings-ethers/src/permissioned_stake_table.rs index dfabb2739a..aa4e50abbb 100644 --- a/contract-bindings-ethers/src/permissioned_stake_table.rs +++ b/contract-bindings-ethers/src/permissioned_stake_table.rs @@ -763,17 +763,17 @@ pub mod permissioned_stake_table { match self { Self::InvalidInitialization(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::NotInitializing(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::OwnableInvalidOwner(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::OwnableUnauthorizedAccount(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StakerAlreadyExists(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::StakerNotFound(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RevertString(s) => ::ethers::core::abi::AbiEncode::encode(s), } @@ -787,28 +787,28 @@ pub mod permissioned_stake_table { == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ => false, } } @@ -1178,7 +1178,7 @@ pub mod permissioned_stake_table { Self::Initialize(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::InitializedAtBlock(element) => { ::ethers::core::abi::AbiEncode::encode(element) - } + }, Self::IsStaker(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::Owner(element) => ::ethers::core::abi::AbiEncode::encode(element), Self::RenounceOwnership(element) => ::ethers::core::abi::AbiEncode::encode(element), diff --git a/contract-bindings-ethers/src/plonk_verifier.rs b/contract-bindings-ethers/src/plonk_verifier.rs index 9f5ae4cd72..48b9775bd4 100644 --- a/contract-bindings-ethers/src/plonk_verifier.rs +++ b/contract-bindings-ethers/src/plonk_verifier.rs @@ -442,12 +442,12 @@ pub mod plonk_verifier { [0x08, 0xc3, 0x79, 0xa0] => true, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => { true - } + }, _ if selector == ::selector() => true, _ => false, } diff --git a/contracts/rust/diff-test/src/main.rs b/contracts/rust/diff-test/src/main.rs index a07382ff11..dc396fcc4f 100644 --- a/contracts/rust/diff-test/src/main.rs +++ b/contracts/rust/diff-test/src/main.rs @@ -102,7 +102,7 @@ fn main() { field_to_u256(domain.group_gen), ); println!("{}", res.encode_hex()); - } + }, Action::EvalDomainElements => { if cli.args.len() != 2 { panic!("Should provide arg1=logSize, arg2=length"); @@ -117,7 +117,7 @@ fn main() { .map(field_to_u256) .collect::>(); println!("{}", res.encode_hex()); - } + }, Action::EvalDataGen => { if cli.args.len() != 3 { panic!("Should provide arg1=logSize, arg2=zeta, arg3=publicInput"); @@ -138,7 +138,7 @@ fn main() { field_to_u256(pi_eval), ); println!("{}", res.encode_hex()); - } + }, Action::TranscriptAppendMsg => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=message"); @@ -153,7 +153,7 @@ fn main() { >::append_message(&mut t, &[], &msg).unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendField => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=fieldElement"); @@ -165,7 +165,7 @@ fn main() { t.append_field_elem::(&[], &field).unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendGroup => { if cli.args.len() != 2 { panic!("Should provide arg1=transcript, arg2=groupElement"); @@ -179,7 +179,7 @@ fn main() { .unwrap(); let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptGetChal => { if cli.args.len() != 1 { panic!("Should provide arg1=transcript"); @@ -193,7 +193,7 @@ fn main() { let updated_t: ParsedTranscript = t.into(); let res = (updated_t, field_to_u256(chal)); println!("{}", res.encode_hex()); - } + }, Action::TranscriptAppendVkAndPi => { if cli.args.len() != 3 { panic!("Should provide arg1=transcript, arg2=verifyingKey, arg3=publicInput"); @@ -210,7 +210,7 @@ fn main() { let res: ParsedTranscript = t.into(); println!("{}", (res,).encode_hex()); - } + }, Action::TranscriptAppendProofEvals => { if cli.args.len() != 1 { panic!("Should provide arg1=transcript"); @@ -232,7 +232,7 @@ fn main() { let t_updated: ParsedTranscript = t.into(); let res = (t_updated, proof_parsed); println!("{}", res.encode_hex()); - } + }, Action::PlonkConstants => { let coset_k = coset_k(); let open_key = open_key(); @@ -250,7 +250,7 @@ fn main() { field_to_u256::(open_key.beta_h.y().unwrap().c0), ); println!("{}", res.encode_hex()); - } + }, Action::PlonkComputeChal => { if cli.args.len() != 4 { panic!("Should provide arg1=verifyingKey, arg2=publicInput, arg3=proof, arg4=extraTranscriptInitMsg"); @@ -275,7 +275,7 @@ fn main() { .unwrap() .into(); println!("{}", (chal,).encode_hex()); - } + }, Action::PlonkVerify => { let (proof, vk, public_input, _, _): ( Proof, @@ -304,7 +304,7 @@ fn main() { let res = (vk_parsed, pi_parsed, proof_parsed); println!("{}", res.encode_hex()); - } + }, Action::DummyProof => { let mut rng = jf_utils::test_rng(); if !cli.args.is_empty() { @@ -313,10 +313,10 @@ fn main() { } let proof = ParsedPlonkProof::dummy(&mut rng); println!("{}", (proof,).encode_hex()); - } + }, Action::TestOnly => { println!("args: {:?}", cli.args); - } + }, Action::GenClientWallet => { if cli.args.len() != 2 { panic!("Should provide arg1=senderAddress arg2=seed"); @@ -358,7 +358,7 @@ fn main() { sender_address, ); println!("{}", res.encode_hex()); - } + }, Action::GenRandomG2Point => { if cli.args.len() != 1 { panic!("Should provide arg1=exponent"); @@ -370,7 +370,7 @@ fn main() { let point_parsed: ParsedG2Point = point.into(); let res = point_parsed; println!("{}", (res.encode_hex())); - } + }, Action::MockGenesis => { if cli.args.len() != 1 { panic!("Should provide arg1=numInitValidators"); @@ -382,7 +382,7 @@ fn main() { let res = (ledger.get_state(), ledger.get_stake_table_state()); println!("{}", res.encode_hex()); - } + }, Action::MockConsecutiveFinalizedStates => { if cli.args.len() != 1 { panic!("Should provide arg1=numInitValidators"); @@ -413,7 +413,7 @@ fn main() { let res = (new_states, proofs); println!("{}", res.encode_hex()); - } + }, Action::MockSkipBlocks => { if cli.args.is_empty() || cli.args.len() > 2 { panic!("Should provide arg1=numBlockSkipped,arg2(opt)=requireValidProof"); @@ -444,7 +444,7 @@ fn main() { (state_parsed, proof_parsed) }; println!("{}", res.encode_hex()); - } + }, Action::GenBLSHashes => { if cli.args.len() != 1 { panic!("Should provide arg1=message"); @@ -464,7 +464,7 @@ fn main() { let res = (fq_u256, hash_to_curve_elem_parsed); println!("{}", res.encode_hex()); - } + }, Action::GenBLSSig => { let mut rng = jf_utils::test_rng(); @@ -486,6 +486,6 @@ fn main() { let res = (vk_parsed, sig_parsed); println!("{}", res.encode_hex()); - } + }, }; } diff --git a/hotshot-builder-api/src/api.rs b/hotshot-builder-api/src/api.rs index 04042630c1..5250d1320a 100644 --- a/hotshot-builder-api/src/api.rs +++ b/hotshot-builder-api/src/api.rs @@ -34,7 +34,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-builder-core-refactored/src/service.rs b/hotshot-builder-core-refactored/src/service.rs index d9831565d7..46e1447846 100644 --- a/hotshot-builder-core-refactored/src/service.rs +++ b/hotshot-builder-core-refactored/src/service.rs @@ -201,7 +201,7 @@ where match event.event { EventType::Error { error } => { error!("Error event in HotShot: {:?}", error); - } + }, EventType::Transactions { transactions } => { let this = Arc::clone(&self); spawn(async move { @@ -217,7 +217,7 @@ where .collect::>() .await; }); - } + }, EventType::Decide { leaf_chain, .. } => { let prune_cutoff = leaf_chain[0].leaf.view_number(); @@ -226,16 +226,16 @@ where let this = Arc::clone(&self); spawn(async move { this.block_store.write().await.prune(prune_cutoff) }); - } + }, EventType::DaProposal { proposal, .. } => { let coordinator = Arc::clone(&self.coordinator); spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); - } + }, EventType::QuorumProposal { proposal, .. } => { let coordinator = Arc::clone(&self.coordinator); spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); - } - _ => {} + }, + _ => {}, } } } @@ -287,10 +287,10 @@ where BuilderStateLookup::Found(builder) => break Ok(builder), BuilderStateLookup::Decided => { return Err(Error::AlreadyDecided); - } + }, BuilderStateLookup::NotFound => { sleep(check_period).await; - } + }, }; } } @@ -374,7 +374,7 @@ where Err(error) => { warn!(?error, "Failed to build block payload"); return Err(Error::BuildBlock(error)); - } + }, }; // count the number of txns @@ -442,7 +442,7 @@ where // Timeout waiting for ideal state, get the highest view builder instead warn!("Couldn't find the ideal builder state"); self.coordinator.highest_view_builder().await - } + }, Ok(Err(e)) => { // State already decided let lowest_view = self.coordinator.lowest_view().await; @@ -451,7 +451,7 @@ where "get_available_blocks request for decided view" ); return Err(e); - } + }, }; let Some(builder) = builder else { @@ -485,7 +485,7 @@ where } Ok(vec![response]) - } + }, // Success, but no block: we don't have transactions and aren't prioritizing finalization Ok(Ok(None)) => Ok(vec![]), // Error building block, try to respond with a cached one as last-ditch attempt @@ -495,7 +495,7 @@ where } else { Err(e) } - } + }, } } diff --git a/hotshot-builder-core/src/builder_state.rs b/hotshot-builder-core/src/builder_state.rs index 558d0d8767..7d9bf426a4 100644 --- a/hotshot-builder-core/src/builder_state.rs +++ b/hotshot-builder-core/src/builder_state.rs @@ -295,7 +295,7 @@ async fn best_builder_states_to_extend( Some(parent_block_references) => { parent_block_references.leaf_commit == justify_qc.data.leaf_commit && parent_block_references.view_number == justify_qc.view_number - } + }, }, ) .map(|(builder_state_id, _)| builder_state_id.clone()) @@ -1102,15 +1102,15 @@ impl BuilderState { } self.txns_in_queue.insert(tx.commit); self.tx_queue.push_back(tx); - } + }, Err(async_broadcast::TryRecvError::Empty) | Err(async_broadcast::TryRecvError::Closed) => { break; - } + }, Err(async_broadcast::TryRecvError::Overflowed(lost)) => { tracing::warn!("Missed {lost} transactions due to backlog"); continue; - } + }, } } } diff --git a/hotshot-builder-core/src/service.rs b/hotshot-builder-core/src/service.rs index a45c4039a0..abb68c4486 100644 --- a/hotshot-builder-core/src/service.rs +++ b/hotshot-builder-core/src/service.rs @@ -409,19 +409,19 @@ impl GlobalState { match old_status { Some(TransactionStatus::Rejected { reason }) => { tracing::debug!("Changing the status of a rejected transaction to status {:?}! The reason it is previously rejected is {:?}", txn_status, reason); - } + }, Some(TransactionStatus::Sequenced { leaf }) => { let e = format!("Changing the status of a sequenced transaction to status {:?} is not allowed! The transaction is sequenced in leaf {:?}", txn_status, leaf); tracing::error!(e); return Err(BuildError::Error(e)); - } + }, _ => { tracing::debug!( "change status of transaction {txn_hash} from {:?} to {:?}", old_status, txn_status ); - } + }, } } else { tracing::debug!( @@ -540,23 +540,23 @@ impl From> for BuildError { match error { AvailableBlocksError::SignatureValidationFailed => { BuildError::Error("Signature validation failed in get_available_blocks".to_string()) - } + }, AvailableBlocksError::RequestForAvailableViewThatHasAlreadyBeenDecided => { BuildError::Error( "Request for available blocks for a view that has already been decided." .to_string(), ) - } + }, AvailableBlocksError::SigningBlockFailed(e) => { BuildError::Error(format!("Signing over block info failed: {:?}", e)) - } + }, AvailableBlocksError::GetChannelForMatchingBuilderError(e) => e.into(), AvailableBlocksError::NoBlocksAvailable => { BuildError::Error("No blocks available".to_string()) - } + }, AvailableBlocksError::ChannelUnexpectedlyClosed => { BuildError::Error("Channel unexpectedly closed".to_string()) - } + }, } } } @@ -580,13 +580,13 @@ impl From> for BuildError { match error { ClaimBlockError::SignatureValidationFailed => { BuildError::Error("Signature validation failed in claim block".to_string()) - } + }, ClaimBlockError::SigningCommitmentFailed(e) => { BuildError::Error(format!("Signing over builder commitment failed: {:?}", e)) - } + }, ClaimBlockError::BlockDataNotFound => { BuildError::Error("Block data not found".to_string()) - } + }, } } } @@ -608,10 +608,10 @@ impl From> for BuildError { ), ClaimBlockHeaderInputError::BlockHeaderNotFound => { BuildError::Error("Block header not found".to_string()) - } + }, ClaimBlockHeaderInputError::FailedToSignFeeInfo(e) => { BuildError::Error(format!("Failed to sign fee info: {:?}", e)) - } + }, } } } @@ -743,7 +743,7 @@ impl ProxyGlobalState { break Err(AvailableBlocksError::NoBlocksAvailable); } continue; - } + }, Ok(recv_attempt) => { if recv_attempt.is_none() { tracing::error!( @@ -752,7 +752,7 @@ impl ProxyGlobalState { } break recv_attempt .ok_or_else(|| AvailableBlocksError::ChannelUnexpectedlyClosed); - } + }, } }; @@ -783,13 +783,13 @@ impl ProxyGlobalState { response.builder_hash ); Ok(vec![initial_block_info]) - } + }, // We failed to get available blocks Err(e) => { tracing::debug!("Failed to get available blocks for parent {state_id}",); Err(e) - } + }, } } @@ -1111,7 +1111,7 @@ pub async fn run_non_permissioned_standalone_builder_service< match event.event { EventType::Error { error } => { tracing::error!("Error event in HotShot: {:?}", error); - } + }, // tx event EventType::Transactions { transactions } => { let max_block_size = { @@ -1151,7 +1151,7 @@ pub async fn run_non_permissioned_standalone_builder_service< .await?; } } - } + }, // decide event EventType::Decide { block_size: _, @@ -1160,19 +1160,19 @@ pub async fn run_non_permissioned_standalone_builder_service< } => { let latest_decide_view_num = leaf_chain[0].leaf.view_number(); handle_decide_event(&decide_sender, latest_decide_view_num).await; - } + }, // DA proposal event EventType::DaProposal { proposal, sender } => { handle_da_event(&da_sender, Arc::new(proposal), sender).await; - } + }, // QC proposal event EventType::QuorumProposal { proposal, sender } => { // get the leader for current view handle_quorum_event(&quorum_sender, Arc::new(proposal), sender).await; - } + }, _ => { tracing::debug!("Unhandled event from Builder"); - } + }, } } } @@ -2141,10 +2141,10 @@ mod test { match vid_trigger_receiver.await { Ok(TriggerStatus::Start) => { // This is expected - } + }, _ => { panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } + }, } } @@ -2366,10 +2366,10 @@ mod test { match vid_trigger_receiver_2.await { Ok(TriggerStatus::Start) => { // This is expected - } + }, _ => { panic!("did not receive TriggerStatus::Start from vid_trigger_receiver as expected"); - } + }, } assert!( @@ -2960,13 +2960,13 @@ mod test { Err(AvailableBlocksError::NoBlocksAvailable) => { // This is what we expect. // This message *should* indicate that no blocks were available. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3032,13 +3032,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3103,13 +3103,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3172,13 +3172,13 @@ mod test { Err(AvailableBlocksError::GetChannelForMatchingBuilderError(_)) => { // This is what we expect. // This message *should* indicate that the response channel was closed. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3285,17 +3285,17 @@ mod test { let response_channel = match response_receiver.next().await { None => { panic!("Expected a request for available blocks, but didn't get one"); - } + }, Some(MessageType::RequestMessage(req_msg)) => { assert_eq!(req_msg.state_id, expected_builder_state_id); req_msg.response_channel - } + }, Some(message) => { panic!( "Expected a request for available blocks, but got a different message: {:?}", message ); - } + }, }; // We want to send a ResponseMessage to the channel @@ -3316,7 +3316,7 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(result) => { assert_eq!( result, @@ -3336,7 +3336,7 @@ mod test { }], "get_available_blocks response matches expectation" ); - } + }, } } @@ -3431,17 +3431,17 @@ mod test { let response_channel = match response_receiver.next().await { None => { panic!("Expected a request for available blocks, but didn't get one"); - } + }, Some(MessageType::RequestMessage(req_msg)) => { assert_eq!(req_msg.state_id, expected_builder_state_id); req_msg.response_channel - } + }, Some(message) => { panic!( "Expected a request for available blocks, but got a different message: {:?}", message ); - } + }, }; // We want to send a ResponseMessage to the channel @@ -3462,7 +3462,7 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(result) => { assert_eq!( result, @@ -3482,7 +3482,7 @@ mod test { }], "get_available_blocks response matches expectation" ); - } + }, } } @@ -3539,13 +3539,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3600,13 +3600,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3687,10 +3687,10 @@ mod test { match vid_trigger_receiver.await { Ok(TriggerStatus::Start) => { // This is what we expect. - } + }, _ => { panic!("Expected a TriggerStatus::Start event"); - } + }, } let result = claim_block_join_handle.await; @@ -3698,10 +3698,10 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { // This is expected - } + }, } } @@ -3759,13 +3759,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3820,13 +3820,13 @@ mod test { // This is what we expect. // This message *should* indicate that the signature passed // did not match the given public key. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, } } @@ -3887,10 +3887,10 @@ mod test { match result { Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, Ok(_) => { // This is expected. - } + }, } } @@ -3943,13 +3943,13 @@ mod test { match result { Err(HandleDaEventError::SignatureValidationFailed) => { // This is expected. - } + }, Ok(_) => { panic!("expected an error, but received a successful attempt instead") - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4001,13 +4001,13 @@ mod test { match result { Err(HandleDaEventError::BroadcastFailed(_)) => { // This error is expected - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4050,20 +4050,20 @@ mod test { match result { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } let mut da_channel_receiver = da_channel_receiver; match da_channel_receiver.next().await { Some(MessageType::DaProposalMessage(da_proposal_message)) => { assert_eq!(da_proposal_message.proposal, signed_da_proposal); - } + }, _ => { panic!("Expected a DaProposalMessage, but got something else"); - } + }, } } @@ -4134,13 +4134,13 @@ mod test { match result { Err(HandleQuorumEventError::SignatureValidationFailed) => { // This is expected. - } + }, Ok(_) => { panic!("expected an error, but received a successful attempt instead"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4209,13 +4209,13 @@ mod test { match result { Err(HandleQuorumEventError::BroadcastFailed(_)) => { // This is expected. - } + }, Ok(_) => { panic!("Expected an error, but got a result"); - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4275,20 +4275,20 @@ mod test { match result { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } let mut quorum_channel_receiver = quorum_channel_receiver; match quorum_channel_receiver.next().await { Some(MessageType::QuorumProposalMessage(da_proposal_message)) => { assert_eq!(da_proposal_message.proposal, signed_quorum_proposal); - } + }, _ => { panic!("Expected a QuorumProposalMessage, but got something else"); - } + }, } } @@ -4323,16 +4323,16 @@ mod test { match handle_received_txns_iter.next() { Some(Err(HandleReceivedTxnsError::TooManyTransactions)) => { // This is expected, - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4376,16 +4376,16 @@ mod test { // This is expected, assert!(estimated_length >= 256); assert_eq!(max_txn_len, TEST_MAX_TX_LEN); - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4431,21 +4431,21 @@ mod test { match err { async_broadcast::TrySendError::Closed(_) => { // This is expected. - } + }, _ => { panic!("Unexpected error: {:?}", err); - } + }, } - } + }, Some(Err(err)) => { panic!("Unexpected error: {:?}", err); - } + }, Some(Ok(_)) => { panic!("Expected an error, but got a result"); - } + }, None => { panic!("Expected an error, but got a result"); - } + }, } } } @@ -4473,10 +4473,10 @@ mod test { match iteration { Ok(_) => { // This is expected. - } + }, Err(err) => { panic!("Unexpected error: {:?}", err); - } + }, } } @@ -4485,10 +4485,10 @@ mod test { match tx_receiver.next().await { Some(received_txn) => { assert_eq!(received_txn.tx, tx); - } + }, _ => { panic!("Expected a TransactionMessage, but got something else"); - } + }, } } } @@ -4553,10 +4553,10 @@ mod test { match proxy_global_state.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!("transaction status should be Pending instead of {:?}", e); - } + }, } } @@ -4594,10 +4594,10 @@ mod test { match proxy_global_state.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!("transaction status should be Pending instead of {:?}", e); - } + }, } } @@ -4624,13 +4624,13 @@ mod test { } else { assert_eq!(txn_status, TransactionStatus::Pending); } - } + }, e => { panic!( "transaction status should be a valid status instead of {:?}", e ); - } + }, } } @@ -4644,22 +4644,22 @@ mod test { { Err(err) => { panic!("Expected a result, but got a error {:?}", err); - } + }, _ => { // This is expected - } + }, } match write_guard.txn_status(tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Pending); - } + }, e => { panic!( "transaction status should be a valid status instead of {:?}", e ); - } + }, } } } @@ -4682,10 +4682,10 @@ mod test { { Err(_err) => { // This is expected - } + }, _ => { panic!("Expected an error, but got a result"); - } + }, } } @@ -4695,10 +4695,10 @@ mod test { match proxy_global_state.txn_status(unknown_tx.commit()).await { Ok(txn_status) => { assert_eq!(txn_status, TransactionStatus::Unknown); - } + }, e => { panic!("transaction status should be Unknown instead of {:?}", e); - } + }, } } } diff --git a/hotshot-builder-core/src/testing/basic_test.rs b/hotshot-builder-core/src/testing/basic_test.rs index 867706c213..b769ef7c61 100644 --- a/hotshot-builder-core/src/testing/basic_test.rs +++ b/hotshot-builder-core/src/testing/basic_test.rs @@ -461,7 +461,7 @@ mod tests { ) .unwrap(); current_leaf - } + }, }; DecideMessage:: { diff --git a/hotshot-builder-core/src/testing/mod.rs b/hotshot-builder-core/src/testing/mod.rs index 279e3ac84c..74f0661b88 100644 --- a/hotshot-builder-core/src/testing/mod.rs +++ b/hotshot-builder-core/src/testing/mod.rs @@ -192,7 +192,7 @@ pub async fn calc_proposal_msg( &TestInstanceState::default(), ) .await - } + }, Some(prev_proposal) => { let prev_justify_qc = prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { @@ -208,7 +208,7 @@ pub async fn calc_proposal_msg( prev_justify_qc.signatures.clone(), PhantomData, ) - } + }, }; tracing::debug!("Iteration: {} justify_qc: {:?}", round, justify_qc); diff --git a/hotshot-events-service/src/api.rs b/hotshot-events-service/src/api.rs index 215d1f28e4..dd4579cef8 100644 --- a/hotshot-events-service/src/api.rs +++ b/hotshot-events-service/src/api.rs @@ -40,7 +40,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-events-service/src/events_source.rs b/hotshot-events-service/src/events_source.rs index 4f905bca63..22e6793384 100644 --- a/hotshot-events-service/src/events_source.rs +++ b/hotshot-events-service/src/events_source.rs @@ -99,7 +99,7 @@ impl EventFilterSet { EventType::Decide { .. } => filter.contains(&EventFilter::Decide), EventType::ReplicaViewTimeout { .. } => { filter.contains(&EventFilter::ReplicaViewTimeout) - } + }, EventType::ViewFinished { .. } => filter.contains(&EventFilter::ViewFinished), EventType::ViewTimeout { .. } => filter.contains(&EventFilter::ViewTimeout), EventType::Transactions { .. } => filter.contains(&EventFilter::Transactions), diff --git a/hotshot-example-types/src/testable_delay.rs b/hotshot-example-types/src/testable_delay.rs index 07f460eaf3..ea16b4b3f3 100644 --- a/hotshot-example-types/src/testable_delay.rs +++ b/hotshot-example-types/src/testable_delay.rs @@ -85,16 +85,16 @@ pub trait TestableDelay { /// Add a delay from settings async fn handle_async_delay(settings: &DelaySettings) { match settings.delay_option { - DelayOptions::None => {} + DelayOptions::None => {}, DelayOptions::Fixed => { sleep(Duration::from_millis(settings.fixed_time_in_milliseconds)).await; - } + }, DelayOptions::Random => { let sleep_in_millis = rand::thread_rng().gen_range( settings.min_time_in_milliseconds..=settings.max_time_in_milliseconds, ); sleep(Duration::from_millis(sleep_in_millis)).await; - } + }, } } @@ -124,7 +124,7 @@ impl Iterator for SupportedTraitTypesForAsyncDelayIterator { _ => { assert_eq!(self.index, 3, "Need to ensure that newly added or removed `SupportedTraitTypesForAsyncDelay` enum is handled in iterator"); return None; - } + }, }; self.index += 1; supported_type diff --git a/hotshot-examples/infra/mod.rs b/hotshot-examples/infra/mod.rs index 875d849678..01e9d6223f 100755 --- a/hotshot-examples/infra/mod.rs +++ b/hotshot-examples/infra/mod.rs @@ -441,13 +441,13 @@ pub trait RunDa< match event_stream.next().await { None => { panic!("Error! Event stream completed before consensus ended."); - } + }, Some(Event { event, .. }) => { match event { EventType::Error { error } => { error!("Error in consensus: {:?}", error); // TODO what to do here - } + }, EventType::Decide { leaf_chain, qc: _, @@ -514,16 +514,16 @@ pub trait RunDa< warn!("Leaf chain is greater than 1 with len {}", leaf_chain.len()); } // when we make progress, submit new events - } + }, EventType::ReplicaViewTimeout { view_number } => { warn!("Timed out as a replicas in view {:?}", view_number); - } + }, EventType::ViewTimeout { view_number } => { warn!("Timed out in view {:?}", view_number); - } - _ => {} // mostly DA proposal + }, + _ => {}, // mostly DA proposal } - } + }, } } // Panic if we don't have the genesis epoch, there is no recovery from that @@ -1092,11 +1092,11 @@ where }) .collect(); bind_address = Url::parse(&format!("http://0.0.0.0:{port}")).unwrap(); - } + }, Some(ref addr) => { bind_address = Url::parse(&format!("http://{addr}")).expect("Valid URL"); advertise_urls = vec![bind_address.clone()]; - } + }, } match run_config.builder { @@ -1116,7 +1116,7 @@ where .await; Some(builder_task) - } + }, BuilderType::Simple => { let builder_task = >::start( @@ -1132,7 +1132,7 @@ where .await; Some(builder_task) - } + }, } } diff --git a/hotshot-fakeapi/src/fake_solver.rs b/hotshot-fakeapi/src/fake_solver.rs index b52418cc9b..f2b81175b5 100644 --- a/hotshot-fakeapi/src/fake_solver.rs +++ b/hotshot-fakeapi/src/fake_solver.rs @@ -91,11 +91,11 @@ impl FakeSolverState { status: tide_disco::StatusCode::INTERNAL_SERVER_ERROR, message: "Internal Server Error".to_string(), }); - } + }, FakeSolverFaultType::TimeoutFault => { // Sleep for the preconfigured 1 second timeout interval tokio::time::sleep(SOLVER_MAX_TIMEOUT_S).await; - } + }, } } diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs index 566db12d4c..cf07181249 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/bootstrap.rs @@ -50,19 +50,19 @@ impl DHTBootstrapTask { Some(InputEvent::BootstrapFinished) => { tracing::debug!("Bootstrap finished"); self.in_progress = false; - } + }, Some(InputEvent::ShutdownBootstrap) => { tracing::info!("ShutdownBootstrap received, shutting down"); break; - } + }, Some(InputEvent::StartBootstrap) => { tracing::warn!("Trying to start bootstrap that's already in progress"); continue; - } + }, None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; - } + }, } } else if let Ok(maybe_event) = timeout(Duration::from_secs(120), self.rx.next()).await { @@ -70,18 +70,18 @@ impl DHTBootstrapTask { Some(InputEvent::StartBootstrap) => { tracing::debug!("Start bootstrap in bootstrap task"); self.bootstrap(); - } + }, Some(InputEvent::ShutdownBootstrap) => { tracing::debug!("ShutdownBootstrap received, shutting down"); break; - } + }, Some(InputEvent::BootstrapFinished) => { tracing::debug!("not in progress got bootstrap finished"); - } + }, None => { tracing::debug!("Bootstrap channel closed, exiting loop"); break; - } + }, } } else { tracing::debug!("Start bootstrap in bootstrap task after timeout"); diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs index 7ef41e1192..950450aecc 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/mod.rs @@ -274,31 +274,31 @@ impl DHTBehaviour { let num_entries = o.get_mut(); *num_entries += 1; *num_entries - } + }, std::collections::hash_map::Entry::Vacant(v) => { v.insert(1); 1 - } + }, } - } + }, GetRecordOk::FinishedWithNoAdditionalRecord { cache_candidates: _, } => { tracing::debug!("GetRecord Finished with No Additional Record"); last = true; 0 - } + }, }, Err(err) => { warn!("Error in Kademlia query: {:?}", err); 0 - } + }, }, None => { // We already finished the query (or it's been cancelled). Do nothing and exit the // function. return; - } + }, }; // if the query has completed and we need to retry @@ -398,7 +398,7 @@ impl DHTBehaviour { if query.notify.send(()).is_err() { warn!("Put DHT: client channel closed before put record request could be sent"); } - } + }, Err(e) => { query.progress = DHTProgress::NotStarted; query.backoff.start_next(false); @@ -409,7 +409,7 @@ impl DHTBehaviour { ); // push back onto the queue self.retry_put(query); - } + }, } } else { warn!("Put DHT: completed DHT query that is no longer tracked."); @@ -439,7 +439,7 @@ impl DHTBehaviour { if last { self.handle_put_query(record_results, id); } - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetClosestPeers(r), id: query_id, @@ -454,13 +454,13 @@ impl DHTBehaviour { }; }; debug!("Successfully got closest peers for key {:?}", key); - } + }, Err(e) => { if let Some(chan) = self.in_progress_get_closest_peers.remove(&query_id) { let _: Result<_, _> = chan.send(()); }; warn!("Failed to get closest peers: {:?}", e); - } + }, }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::GetRecord(record_results), @@ -469,7 +469,7 @@ impl DHTBehaviour { .. } => { self.handle_get_query(store, record_results, id, last); - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Ok(BootstrapOk { @@ -485,7 +485,7 @@ impl DHTBehaviour { debug!("Bootstrap in progress, {} nodes remaining", num_remaining); } return Some(NetworkEvent::IsBootstrapped); - } + }, KademliaEvent::OutboundQueryProgressed { result: QueryResult::Bootstrap(Err(e)), .. @@ -495,16 +495,16 @@ impl DHTBehaviour { error!("Failed to bootstrap: {:?}", e); } self.finish_bootstrap(); - } + }, KademliaEvent::RoutablePeer { peer, address: _ } => { debug!("Found routable peer {:?}", peer); - } + }, KademliaEvent::PendingRoutablePeer { peer, address: _ } => { debug!("Found pending routable peer {:?}", peer); - } + }, KademliaEvent::UnroutablePeer { peer } => { debug!("Found unroutable peer {:?}", peer); - } + }, KademliaEvent::RoutingUpdated { peer: _, is_new_peer: _, @@ -513,13 +513,13 @@ impl DHTBehaviour { old_peer: _, } => { debug!("Routing table updated"); - } + }, e @ KademliaEvent::OutboundQueryProgressed { .. } => { debug!("Not handling dht event {:?}", e); - } + }, e => { debug!("New unhandled swarm event: {e:?}"); - } + }, } None } diff --git a/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs index cd927c2470..2c89cc741a 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/dht/store/persistent.rs @@ -281,10 +281,10 @@ impl PersistentStore { .await .map_err(|_| anyhow::anyhow!("save operation timed out")) { - Ok(Ok(())) => {} + Ok(Ok(())) => {}, Ok(Err(error)) | Err(error) => { warn!("Failed to save DHT to persistent storage: {error}"); - } + }, }; // Reset the record delta @@ -324,10 +324,10 @@ impl PersistentStore { err ); } - } + }, Err(err) => { warn!("Failed to parse record from persistent storage: {:?}", err); - } + }, }; } diff --git a/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs index 72d378a587..dfd8e5ca4f 100644 --- a/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs +++ b/hotshot-libp2p-networking/src/network/behaviours/direct_message.rs @@ -59,7 +59,7 @@ impl DMBehaviour { } => { error!("Inbound message failure from {:?}: {:?}", peer, error); None - } + }, Event::OutboundFailure { peer, request_id, @@ -83,7 +83,7 @@ impl DMBehaviour { } } None - } + }, Event::Message { message, peer, .. } => match message { Message::Request { request: msg, @@ -94,7 +94,7 @@ impl DMBehaviour { // receiver, not initiator. // don't track. If we are disconnected, sender will reinitiate Some(NetworkEvent::DirectRequest(msg, peer, channel)) - } + }, Message::Response { request_id, response: msg, @@ -107,12 +107,12 @@ impl DMBehaviour { warn!("Received response for unknown request id {:?}", request_id); None } - } + }, }, e @ Event::ResponseSent { .. } => { debug!("Response sent {:?}", e); None - } + }, } } } diff --git a/hotshot-libp2p-networking/src/network/cbor.rs b/hotshot-libp2p-networking/src/network/cbor.rs index a8ca6afedf..71f19281e7 100644 --- a/hotshot-libp2p-networking/src/network/cbor.rs +++ b/hotshot-libp2p-networking/src/network/cbor.rs @@ -126,19 +126,19 @@ fn decode_into_io_error(err: cbor4ii::serde::DecodeError) -> io::Err match err { cbor4ii::serde::DecodeError::Core(DecodeError::Read(e)) => { io::Error::new(io::ErrorKind::Other, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e @ DecodeError::Unsupported { .. }) => { io::Error::new(io::ErrorKind::Unsupported, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e @ DecodeError::Eof { .. }) => { io::Error::new(io::ErrorKind::UnexpectedEof, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Core(e) => { io::Error::new(io::ErrorKind::InvalidData, e.to_string()) - } + }, cbor4ii::serde::DecodeError::Custom(e) => { io::Error::new(io::ErrorKind::Other, e.to_string()) - } + }, } } diff --git a/hotshot-libp2p-networking/src/network/node.rs b/hotshot-libp2p-networking/src/network/node.rs index 28d009f846..d5e1703a0e 100644 --- a/hotshot-libp2p-networking/src/network/node.rs +++ b/hotshot-libp2p-networking/src/network/node.rs @@ -360,7 +360,7 @@ impl NetworkNode { query.progress = DHTProgress::NotStarted; query.backoff.start_next(false); error!("Error publishing to DHT: {e:?} for peer {:?}", self.peer_id); - } + }, Ok(qid) => { debug!("Published record to DHT with qid {:?}", qid); let query = KadPutQuery { @@ -368,7 +368,7 @@ impl NetworkNode { ..query }; self.dht_handler.put_record(qid, query); - } + }, } } @@ -392,20 +392,20 @@ impl NetworkNode { ClientRequest::BeginBootstrap => { debug!("Beginning Libp2p bootstrap"); let _ = self.swarm.behaviour_mut().dht.bootstrap(); - } + }, ClientRequest::LookupPeer(pid, chan) => { let id = self.swarm.behaviour_mut().dht.get_closest_peers(pid); self.dht_handler .in_progress_get_closest_peers .insert(id, chan); - } + }, ClientRequest::GetRoutingTable(chan) => { self.dht_handler .print_routing_table(&mut self.swarm.behaviour_mut().dht); if chan.send(()).is_err() { warn!("Tried to notify client but client not tracking anymore"); } - } + }, ClientRequest::PutDHT { key, value, notify } => { let query = KadPutQuery { progress: DHTProgress::NotStarted, @@ -415,17 +415,17 @@ impl NetworkNode { backoff: ExponentialBackoff::default(), }; self.put_record(query); - } + }, ClientRequest::GetConnectedPeerNum(s) => { if s.send(self.num_connected()).is_err() { error!("error sending peer number to client"); } - } + }, ClientRequest::GetConnectedPeers(s) => { if s.send(self.connected_pids()).is_err() { error!("error sending peer set to client"); } - } + }, ClientRequest::GetDHT { key, notify, @@ -439,20 +439,20 @@ impl NetworkNode { retry_count, &mut self.swarm.behaviour_mut().dht, ); - } + }, ClientRequest::IgnorePeers(_peers) => { // NOTE used by test with conductor only - } + }, ClientRequest::Shutdown => { if let Some(listener_id) = self.listener_id { self.swarm.remove_listener(listener_id); } return Ok(true); - } + }, ClientRequest::GossipMsg(topic, contents) => { behaviour.publish_gossip(Topic::new(topic.clone()), contents.clone()); - } + }, ClientRequest::Subscribe(t, chan) => { behaviour.subscribe_gossip(&t); if let Some(chan) = chan { @@ -460,7 +460,7 @@ impl NetworkNode { error!("finished subscribing but response channel dropped"); } } - } + }, ClientRequest::Unsubscribe(t, chan) => { behaviour.unsubscribe_gossip(&t); if let Some(chan) = chan { @@ -468,7 +468,7 @@ impl NetworkNode { error!("finished unsubscribing but response channel dropped"); } } - } + }, ClientRequest::DirectRequest { pid, contents, @@ -483,23 +483,23 @@ impl NetworkNode { retry_count, }; self.direct_message_state.add_direct_request(req, id); - } + }, ClientRequest::DirectResponse(chan, msg) => { behaviour.add_direct_response(chan, msg); - } + }, ClientRequest::AddKnownPeers(peers) => { self.add_known_peers(&peers); - } + }, ClientRequest::Prune(pid) => { if self.swarm.disconnect_peer_id(pid).is_err() { warn!("Could not disconnect from {:?}", pid); } - } + }, } - } + }, None => { error!("Error receiving msg in main behaviour loop: channel closed"); - } + }, } Ok(false) } @@ -541,7 +541,7 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; - } + }, SwarmEvent::ConnectionClosed { connection_id: _, peer_id, @@ -565,13 +565,13 @@ impl NetworkNode { send_to_client .send(NetworkEvent::ConnectedPeersUpdate(self.num_connected())) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; - } + }, SwarmEvent::Dialing { peer_id, connection_id: _, } => { debug!("Attempting to dial {:?}", peer_id); - } + }, SwarmEvent::ListenerClosed { listener_id: _, addresses: _, @@ -591,7 +591,7 @@ impl NetworkNode { connection_id: _, local_addr: _, send_back_addr: _, - } => {} + } => {}, SwarmEvent::Behaviour(b) => { let maybe_event = match b { NetworkEventInternal::DHTEvent(e) => self @@ -621,7 +621,7 @@ impl NetworkNode { } } None - } + }, NetworkEventInternal::GossipEvent(e) => match *e { GossipEvent::Message { propagation_source: _peer_id, @@ -631,25 +631,25 @@ impl NetworkNode { GossipEvent::Subscribed { peer_id, topic } => { debug!("Peer {:?} subscribed to topic {:?}", peer_id, topic); None - } + }, GossipEvent::Unsubscribed { peer_id, topic } => { debug!("Peer {:?} unsubscribed from topic {:?}", peer_id, topic); None - } + }, GossipEvent::GossipsubNotSupported { peer_id } => { warn!("Peer {:?} does not support gossipsub", peer_id); None - } + }, }, NetworkEventInternal::DMEvent(e) => self .direct_message_state .handle_dm_event(e, self.resend_tx.clone()), NetworkEventInternal::AutonatEvent(e) => { match e { - autonat::Event::InboundProbe(_) => {} + autonat::Event::InboundProbe(_) => {}, autonat::Event::OutboundProbe(e) => match e { autonat::OutboundProbeEvent::Request { .. } - | autonat::OutboundProbeEvent::Response { .. } => {} + | autonat::OutboundProbeEvent::Response { .. } => {}, autonat::OutboundProbeEvent::Error { probe_id: _, peer, @@ -659,14 +659,14 @@ impl NetworkNode { "AutoNAT Probe failed to peer {:?} with error: {:?}", peer, error ); - } + }, }, autonat::Event::StatusChanged { old, new } => { debug!("AutoNAT Status changed. Old: {:?}, New: {:?}", old, new); - } + }, }; None - } + }, }; if let Some(event) = maybe_event { @@ -675,14 +675,14 @@ impl NetworkNode { .send(event) .map_err(|err| NetworkError::ChannelSendError(err.to_string()))?; } - } + }, SwarmEvent::OutgoingConnectionError { connection_id: _, peer_id, error, } => { warn!("Outgoing connection error to {:?}: {:?}", peer_id, error); - } + }, SwarmEvent::IncomingConnectionError { connection_id: _, local_addr: _, @@ -690,29 +690,29 @@ impl NetworkNode { error, } => { warn!("Incoming connection error: {:?}", error); - } + }, SwarmEvent::ListenerError { listener_id: _, error, } => { warn!("Listener error: {:?}", error); - } + }, SwarmEvent::ExternalAddrConfirmed { address } => { let my_id = *self.swarm.local_peer_id(); self.swarm .behaviour_mut() .dht .add_address(&my_id, address.clone()); - } + }, SwarmEvent::NewExternalAddrOfPeer { peer_id, address } => { self.swarm .behaviour_mut() .dht .add_address(&peer_id, address.clone()); - } + }, _ => { debug!("Unhandled swarm event {:?}", event); - } + }, } Ok(()) } diff --git a/hotshot-libp2p-networking/src/network/transport.rs b/hotshot-libp2p-networking/src/network/transport.rs index 01e94e6b90..b69c2c9019 100644 --- a/hotshot-libp2p-networking/src/network/transport.rs +++ b/hotshot-libp2p-networking/src/network/transport.rs @@ -358,7 +358,7 @@ where local_addr, send_back_addr, } - } + }, // We need to re-map the other events because we changed the type of the upgrade TransportEvent::AddressExpired { @@ -377,7 +377,7 @@ where }, TransportEvent::ListenerError { listener_id, error } => { TransportEvent::ListenerError { listener_id, error } - } + }, TransportEvent::NewAddress { listener_id, listen_addr, diff --git a/hotshot-macros/src/lib.rs b/hotshot-macros/src/lib.rs index 3608ef6da1..f318d8e1b0 100644 --- a/hotshot-macros/src/lib.rs +++ b/hotshot-macros/src/lib.rs @@ -118,7 +118,7 @@ impl ToLowerSnakeStr for syn::GenericArgument { syn::Type::Path(p) => p.to_lower_snake_str(), _ => { panic!("Unexpected type for GenericArgument::Type: {t:?}"); - } + }, }, syn::GenericArgument::Const(c) => match c { syn::Expr::Lit(l) => match &l.lit { @@ -126,15 +126,15 @@ impl ToLowerSnakeStr for syn::GenericArgument { syn::Lit::Int(v) => format!("{}_", v.base10_digits()), _ => { panic!("Unexpected type for GenericArgument::Const::Lit: {l:?}"); - } + }, }, _ => { panic!("Unexpected type for GenericArgument::Const: {c:?}"); - } + }, }, _ => { panic!("Unexpected type for GenericArgument: {self:?}"); - } + }, } } } diff --git a/hotshot-orchestrator/src/client.rs b/hotshot-orchestrator/src/client.rs index de167ff505..3c1d8e0884 100644 --- a/hotshot-orchestrator/src/client.rs +++ b/hotshot-orchestrator/src/client.rs @@ -515,7 +515,7 @@ impl OrchestratorClient { Err(err) => { tracing::info!("{err}"); sleep(Duration::from_millis(250)).await; - } + }, } } } diff --git a/hotshot-query-service/src/api.rs b/hotshot-query-service/src/api.rs index 5f447fbb53..25fe74c5f0 100644 --- a/hotshot-query-service/src/api.rs +++ b/hotshot-query-service/src/api.rs @@ -40,7 +40,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/hotshot-query-service/src/availability.rs b/hotshot-query-service/src/availability.rs index d1dbcd249f..404907f93a 100644 --- a/hotshot-query-service/src/availability.rs +++ b/hotshot-query-service/src/availability.rs @@ -527,7 +527,7 @@ where .context(FetchTransactionSnafu { resource: hash.to_string(), }) - } + }, None => { let height: u64 = req.integer_param("height")?; let fetch = state @@ -543,7 +543,7 @@ where .context(InvalidTransactionIndexSnafu { height, index: i })?; TransactionQueryData::new(&block, index, i) .context(InvalidTransactionIndexSnafu { height, index: i }) - } + }, } } .boxed() @@ -657,7 +657,7 @@ mod test { let leaf = client.get(&format!("leaf/{}", i)).send().await.unwrap(); blocks.push((leaf, block)); } - } + }, Err(Error::Availability { source: super::Error::FetchBlock { .. }, }) => { @@ -665,7 +665,7 @@ mod test { "found end of ledger at height {i}, non-empty blocks are {blocks:?}", ); return (i, blocks); - } + }, Err(err) => panic!("unexpected error {}", err), } } diff --git a/hotshot-query-service/src/data_source/fetching.rs b/hotshot-query-service/src/data_source/fetching.rs index e98fa236df..28a44fe34c 100644 --- a/hotshot-query-service/src/data_source/fetching.rs +++ b/hotshot-query-service/src/data_source/fetching.rs @@ -467,15 +467,15 @@ where match storage.prune(&mut pruner).await { Ok(Some(height)) => { tracing::warn!("Pruned to height {height}"); - } + }, Ok(None) => { tracing::warn!("pruner run complete."); break; - } + }, Err(e) => { tracing::error!("pruner run failed: {e:?}"); break; - } + }, } } } @@ -977,7 +977,7 @@ where ?req, "unable to fetch object; spawning a task to retry: {err:#}" ); - } + }, } // We'll use this channel to get the object back if we successfully load it on retry. @@ -1005,14 +1005,14 @@ where tracing::info!(?req, "object was ready after retries"); send.send(obj).ok(); break; - } + }, Ok(None) => { // The object was not immediately available after all, but we have // successfully spawned a fetch for it if possible. The spawned fetch // will notify the original request once it completes. tracing::info!(?req, "spawned fetch after retries"); break; - } + }, Err(err) => { tracing::warn!( ?req, @@ -1023,7 +1023,7 @@ where if let Some(next_delay) = backoff.next_backoff() { delay = next_delay; } - } + }, } } } @@ -1058,12 +1058,12 @@ where tracing::debug!(?req, "object missing from local storage, will try to fetch"); self.fetch::(&mut tx, req).await?; Ok(None) - } + }, Err(err) => { // An error occurred while querying the database. We don't know if we need to fetch // the object or not. Return an error so we can try again. bail!("failed to fetch resource {req:?} from local storage: {err:#}"); - } + }, } } @@ -1224,13 +1224,13 @@ where None => passive(T::Request::from(chunk.start + i), passive_fetch), }) .collect(); - } + }, Err(err) => { tracing::warn!( ?chunk, "unable to fetch chunk; spawning a task to retry: {err:#}" ); - } + }, } // We'll use these channels to get the objects back that we successfully load on retry. @@ -1272,7 +1272,7 @@ where } } break; - } + }, Err(err) => { tracing::warn!( ?chunk, @@ -1283,7 +1283,7 @@ where if let Some(next_delay) = backoff.next_backoff() { delay = next_delay; } - } + }, } } } @@ -1432,7 +1432,7 @@ where backoff = min(2 * backoff, max_backoff); metrics.backoff.set(backoff.as_secs() as usize); continue; - } + }, }; let heights = match Heights::load(&mut tx).await { Ok(heights) => heights, @@ -1443,7 +1443,7 @@ where backoff = min(2 * backoff, max_backoff); metrics.backoff.set(backoff.as_secs() as usize); continue; - } + }, }; metrics.retries.set(0); break heights; @@ -1577,7 +1577,7 @@ where tracing::error!("unable to open read tx: {err:#}"); sleep(Duration::from_secs(5)).await; continue; - } + }, }; match tx.load_prev_aggregate().await { Ok(agg) => break agg, @@ -1585,7 +1585,7 @@ where tracing::error!("unable to load previous aggregate: {err:#}"); sleep(Duration::from_secs(5)).await; continue; - } + }, } }; @@ -1629,7 +1629,7 @@ where match res { Ok(()) => { break; - } + }, Err(err) => { tracing::warn!( num_blocks, @@ -1637,7 +1637,7 @@ where "failed to update aggregates for chunk: {err:#}" ); sleep(Duration::from_secs(1)).await; - } + }, } } metrics.height.set(height as usize); @@ -2201,7 +2201,7 @@ impl ResultExt for Result { "error loading resource from local storage, will try to fetch: {err:#}" ); None - } + }, } } } @@ -2320,7 +2320,7 @@ where // dropped. If this happens, things are very broken in any case, and it is // better to panic loudly than simply block forever. panic!("notifier dropped without satisfying request {req:?}"); - } + }, } }) .boxed(), diff --git a/hotshot-query-service/src/data_source/fetching/header.rs b/hotshot-query-service/src/data_source/fetching/header.rs index 6782fb5d37..21caf2e568 100644 --- a/hotshot-query-service/src/data_source/fetching/header.rs +++ b/hotshot-query-service/src/data_source/fetching/header.rs @@ -188,14 +188,14 @@ where header.block_number() ); fetch_block_with_header(fetcher, header); - } + }, Self::VidCommon { fetcher } => { tracing::info!( "fetched leaf {}, will now fetch VID common", header.block_number() ); fetch_vid_common_with_header(fetcher, header); - } + }, } } } @@ -225,17 +225,17 @@ where Ok(header) => { callback.run(header); return Ok(()); - } + }, Err(QueryError::Missing | QueryError::NotFound) => { // We successfully queried the database, but the header wasn't there. Fall through to // fetching it. tracing::debug!(?req, "header not available locally; trying fetch"); - } + }, Err(QueryError::Error { message }) => { // An error occurred while querying the database. We don't know if we need to fetch the // header or not. Return an error so we can try again. bail!("failed to fetch header for block {req:?}: {message}"); - } + }, } // If the header is _not_ present, we may still be able to fetch the request, but we need to @@ -245,16 +245,16 @@ where match req { BlockId::Number(n) => { fetch_leaf_with_callbacks(tx, callback.fetcher(), n.into(), [callback.into()]).await?; - } + }, BlockId::Hash(h) => { // Given only the hash, we cannot tell if the corresponding leaf actually exists, since // we don't have a corresponding header. Therefore, we will not spawn an active fetch. tracing::debug!("not fetching unknown block {h}"); - } + }, BlockId::PayloadHash(h) => { // Same as above, we don't fetch a block with a payload that is not known to exist. tracing::debug!("not fetching block with unknown payload {h}"); - } + }, } Ok(()) diff --git a/hotshot-query-service/src/data_source/fetching/leaf.rs b/hotshot-query-service/src/data_source/fetching/leaf.rs index 3692d01851..915ae89949 100644 --- a/hotshot-query-service/src/data_source/fetching/leaf.rs +++ b/hotshot-query-service/src/data_source/fetching/leaf.rs @@ -172,19 +172,19 @@ where callbacks, ); return Ok(()); - } + }, Err(QueryError::Missing | QueryError::NotFound) => { // We successfully queried the database, but the next leaf wasn't there. We // know for sure that based on the current state of the DB, we cannot fetch this // leaf. tracing::debug!(n, "not fetching leaf with unknown successor"); return Ok(()); - } + }, Err(QueryError::Error { message }) => { // An error occurred while querying the database. We don't know if we need to // fetch the leaf or not. Return an error so we can try again. bail!("failed to fetch successor for leaf {n}: {message}"); - } + }, }; let fetcher = fetcher.clone(); @@ -197,13 +197,13 @@ where fetcher.provider.clone(), once(LeafCallback::Leaf { fetcher }).chain(callbacks), ); - } + }, LeafId::Hash(h) => { // We don't actively fetch leaves when requested by hash, because we have no way of // knowing whether a leaf with such a hash actually exists, and we don't want to bother // peers with requests for non-existent leaves. tracing::debug!("not fetching unknown leaf {h}"); - } + }, } Ok(()) @@ -262,7 +262,7 @@ pub(super) fn trigger_fetch_for_parent( if tx.get_leaf(((height - 1) as usize).into()).await.is_ok() { return; } - } + }, Err(err) => { // If we can't open a transaction, we can't be sure that we already have the // parent, so we fall through to fetching it just to be safe. @@ -271,7 +271,7 @@ pub(super) fn trigger_fetch_for_parent( %parent, "error opening transaction to check for parent leaf: {err:#}", ); - } + }, } tracing::info!(height, %parent, "received new leaf; fetching missing parent"); @@ -369,7 +369,7 @@ impl Ord for LeafCallback { (Self::Continuation { callback: cb1 }, Self::Continuation { callback: cb2 }) => { cb1.cmp(cb2) - } + }, _ => Ordering::Equal, } } @@ -396,7 +396,7 @@ where // Trigger a fetch of the parent leaf, if we don't already have it. trigger_fetch_for_parent(&fetcher, &leaf); fetcher.store_and_notify(leaf).await; - } + }, Self::Continuation { callback } => callback.run(leaf.leaf.block_header().clone()), } } diff --git a/hotshot-query-service/src/data_source/storage/fail_storage.rs b/hotshot-query-service/src/data_source/storage/fail_storage.rs index 8398e8c303..66c3dac169 100644 --- a/hotshot-query-service/src/data_source/storage/fail_storage.rs +++ b/hotshot-query-service/src/data_source/storage/fail_storage.rs @@ -87,8 +87,8 @@ impl FailureMode { match self { Self::Once(fail_action) if fail_action.matches(action) => { *self = Self::Never; - } - Self::Always(fail_action) if fail_action.matches(action) => {} + }, + Self::Always(fail_action) if fail_action.matches(action) => {}, _ => return Ok(()), } diff --git a/hotshot-query-service/src/data_source/storage/fs.rs b/hotshot-query-service/src/data_source/storage/fs.rs index 3e58a04911..aa75361a6b 100644 --- a/hotshot-query-service/src/data_source/storage/fs.rs +++ b/hotshot-query-service/src/data_source/storage/fs.rs @@ -88,10 +88,10 @@ where BlockId::Number(n) => Ok(n), BlockId::Hash(h) => { Ok(*self.index_by_block_hash.get(&h).context(NotFoundSnafu)? as usize) - } + }, BlockId::PayloadHash(h) => { Ok(*self.index_by_payload_hash.get(&h).context(NotFoundSnafu)? as usize) - } + }, } } @@ -405,11 +405,11 @@ where iter.nth(n - 1); } n - } + }, Bound::Excluded(n) => { iter.nth(n); n + 1 - } + }, Bound::Unbounded => 0, }; @@ -662,10 +662,10 @@ fn update_index_by_hash(index: &mut HashMap, hash: H // Overwrite the existing entry if the new object was sequenced first. e.insert(pos); } - } + }, Entry::Vacant(e) => { e.insert(pos); - } + }, } } @@ -772,7 +772,7 @@ where // entry in `index_by_time` has a non-empty list associated with it, so this // indexing is safe. blocks[0] - } + }, } as usize; let mut res = TimeWindowQueryData::default(); diff --git a/hotshot-query-service/src/data_source/storage/sql.rs b/hotshot-query-service/src/data_source/storage/sql.rs index b12cb5efbb..8be0ffca27 100644 --- a/hotshot-query-service/src/data_source/storage/sql.rs +++ b/hotshot-query-service/src/data_source/storage/sql.rs @@ -577,11 +577,11 @@ impl SqlStorage { match runner.run_async(&mut Migrator::from(&mut conn)).await { Ok(report) => { tracing::info!("ran DB migrations: {report:?}"); - } + }, Err(err) => { tracing::error!("DB migrations failed: {:?}", err.report()); Err(err)?; - } + }, } } @@ -709,7 +709,7 @@ impl PruneStorage for SqlStorage { }; height - } + }, }; // Prune data exceeding target retention in batches diff --git a/hotshot-query-service/src/data_source/storage/sql/queries.rs b/hotshot-query-service/src/data_source/storage/sql/queries.rs index 696aca5ba3..6263f681d6 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries.rs @@ -137,20 +137,20 @@ impl QueryBuilder<'_> { match range.start_bound() { Bound::Included(n) => { bounds.push(format!("{column} >= {}", self.bind(*n as i64)?)); - } + }, Bound::Excluded(n) => { bounds.push(format!("{column} > {}", self.bind(*n as i64)?)); - } - Bound::Unbounded => {} + }, + Bound::Unbounded => {}, } match range.end_bound() { Bound::Included(n) => { bounds.push(format!("{column} <= {}", self.bind(*n as i64)?)); - } + }, Bound::Excluded(n) => { bounds.push(format!("{column} < {}", self.bind(*n as i64)?)); - } - Bound::Unbounded => {} + }, + Bound::Unbounded => {}, } let mut where_clause = bounds.join(" AND "); diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs b/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs index 14fb489a00..0e1fef4c6f 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/explorer.rs @@ -282,7 +282,7 @@ where let query_stmt = match request.target { BlockIdentifier::Latest => { query(&GET_BLOCK_SUMMARIES_QUERY_FOR_LATEST).bind(request.num_blocks.get() as i64) - } + }, BlockIdentifier::Height(height) => query(&GET_BLOCK_SUMMARIES_QUERY_FOR_HEIGHT) .bind(height as i64) .bind(request.num_blocks.get() as i64), @@ -305,10 +305,10 @@ where BlockIdentifier::Latest => query(&GET_BLOCK_DETAIL_QUERY_FOR_LATEST), BlockIdentifier::Height(height) => { query(&GET_BLOCK_DETAIL_QUERY_FOR_HEIGHT).bind(height as i64) - } + }, BlockIdentifier::Hash(hash) => { query(&GET_BLOCK_DETAIL_QUERY_FOR_HASH).bind(hash.to_string()) - } + }, }; let query_result = query_stmt.fetch_one(self.as_mut()).await?; @@ -375,7 +375,7 @@ where TransactionSummaryFilter::Block(block) => { query(&GET_TRANSACTION_SUMMARIES_QUERY_FOR_BLOCK).bind(*block as i64) - } + }, }; let block_stream = query_stmt @@ -432,10 +432,10 @@ where query(&GET_TRANSACTION_DETAIL_QUERY_FOR_HEIGHT_AND_OFFSET) .bind(height as i64) .bind(offset as i64) - } + }, TransactionIdentifier::Hash(hash) => { query(&GET_TRANSACTION_DETAIL_QUERY_FOR_HASH).bind(hash.to_string()) - } + }, }; let query_row = query_stmt.fetch_one(self.as_mut()).await?; @@ -455,7 +455,7 @@ where key: format!("at {height} and {offset}"), }), ) - } + }, TransactionIdentifier::Hash(hash) => txns .into_iter() .enumerate() diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs index 326d29e695..bdae5c8629 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/node.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/node.rs @@ -50,11 +50,11 @@ where // The height of the block is the number of blocks below it, so the total number of // blocks is one more than the height of the highest block. Ok(height as usize + 1) - } + }, (None,) => { // If there are no blocks yet, the height is 0. Ok(0) - } + }, } } @@ -174,11 +174,11 @@ where // The height of the block is the number of blocks below it, so the total number of // blocks is one more than the height of the highest block. height as usize + 1 - } + }, None => { // If there are no blocks yet, the height is 0. 0 - } + }, }; let total_leaves = row.get::("total_leaves") as usize; let null_payloads = row.get::("null_payloads") as usize; @@ -216,7 +216,7 @@ where // sufficient data to answer the query is not as simple as just trying `load_header` // for a specific block ID. return self.time_window::(t, end, limit).await; - } + }, WindowStart::Height(h) => h, WindowStart::Hash(h) => self.load_header::(h).await?.block_number(), }; @@ -479,7 +479,7 @@ async fn aggregate_range_bounds( return Ok(None); } height - 1 - } + }, }; Ok(Some((from, to))) } diff --git a/hotshot-query-service/src/data_source/storage/sql/queries/state.rs b/hotshot-query-service/src/data_source/storage/sql/queries/state.rs index 4e3d41d141..f36c7eaf51 100644 --- a/hotshot-query-service/src/data_source/storage/sql/queries/state.rs +++ b/hotshot-query-service/src/data_source/storage/sql/queries/state.rs @@ -148,7 +148,7 @@ where .decode_error("malformed merkle node value")?, children: child_nodes, }); - } + }, // If it has an entry, it's a leaf (None, None, Some(index), Some(entry)) => { proof_path.push_back(MerkleNode::Leaf { @@ -159,16 +159,16 @@ where elem: serde_json::from_value(entry.clone()) .decode_error("malformed merkle element")?, }); - } + }, // Otherwise, it's empty. (None, None, Some(_), None) => { proof_path.push_back(MerkleNode::Empty); - } + }, _ => { return Err(QueryError::Error { message: "Invalid type of merkle node found".to_string(), }); - } + }, } } } @@ -223,7 +223,7 @@ where State::Digest::digest(&data).map_err(|err| QueryError::Error { message: format!("failed to update digest: {err:#}"), }) - } + }, MerkleNode::Empty => Ok(init), _ => Err(QueryError::Error { message: "Invalid type of Node in the proof".to_string(), @@ -292,7 +292,7 @@ impl Transaction { .await?; (height, commit) - } + }, Snapshot::Index(created) => { let created = created as i64; let (commit,) = query_as::<(String,)>(&format!( @@ -307,7 +307,7 @@ impl Transaction { let commit = serde_json::from_value(commit.into()) .decode_error("malformed state commitment")?; (created, commit) - } + }, }; // Make sure the requested snapshot is up to date. diff --git a/hotshot-query-service/src/data_source/storage/sql/transaction.rs b/hotshot-query-service/src/data_source/storage/sql/transaction.rs index f7443e36a7..4daa390acc 100644 --- a/hotshot-query-service/src/data_source/storage/sql/transaction.rs +++ b/hotshot-query-service/src/data_source/storage/sql/transaction.rs @@ -681,10 +681,10 @@ impl, const ARITY: usize> [0_u8; 32].to_vec(), )); hashset.insert([0_u8; 32].to_vec()); - } + }, MerkleNode::ForgettenSubtree { .. } => { bail!("Node in the Merkle path contains a forgetten subtree"); - } + }, MerkleNode::Leaf { value, pos, elem } => { let mut leaf_commit = Vec::new(); // Serialize the leaf node hash value into a vector @@ -711,7 +711,7 @@ impl, const ARITY: usize> )); hashset.insert(leaf_commit); - } + }, MerkleNode::Branch { value, children } => { // Get hash let mut branch_hash = Vec::new(); @@ -728,7 +728,7 @@ impl, const ARITY: usize> match child { MerkleNode::Empty => { children_bitvec.push(false); - } + }, MerkleNode::Branch { value, .. } | MerkleNode::Leaf { value, .. } | MerkleNode::ForgettenSubtree { value } => { @@ -740,7 +740,7 @@ impl, const ARITY: usize> children_values.push(hash); // Mark the entry as 1 in bitvec to indicate a non-empty child children_bitvec.push(true); - } + }, } } @@ -758,7 +758,7 @@ impl, const ARITY: usize> )); hashset.insert(branch_hash); hashset.extend(children_values); - } + }, } // advance the traversal path for the internal nodes at each iteration diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index f78d1c28f0..451d3fe3aa 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -109,7 +109,7 @@ where "inconsistent leaf; cannot append leaf information: {err:#}" ); return Err(leaf2.block_header().block_number()); - } + }, }; let block_data = leaf2 .block_payload() @@ -141,12 +141,12 @@ where Err(err) => { tracing::warn!("failed to compute genesis VID: {err:#}"); (None, None) - } + }, } } else { (None, None) } - } + }, }; if vid_common.is_none() { @@ -188,13 +188,11 @@ fn genesis_vid( VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), VidShare::V0(disperse.shares.remove(0)), )) - } + }, VidCommitment::V1(commit) => { let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?; let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES]; - tracing::error!(">>>0"); let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode()); - tracing::error!(">>>1"); let (calculated_commit, mut shares) = AvidMScheme::ns_disperse(&avidm_param, &weights, &bytes, ns_table).unwrap(); @@ -209,7 +207,7 @@ fn genesis_vid( VidCommonQueryData::new(leaf.block_header().clone(), None), VidShare::V1(shares.remove(0)), )) - } + }, } } diff --git a/hotshot-query-service/src/explorer.rs b/hotshot-query-service/src/explorer.rs index ef259a83e5..a1db6b72cf 100644 --- a/hotshot-query-service/src/explorer.rs +++ b/hotshot-query-service/src/explorer.rs @@ -308,7 +308,7 @@ where ) { (Ok(Some(height)), Ok(Some(offset)), _) => { TransactionIdentifier::HeightAndOffset(height, offset) - } + }, (_, _, Ok(Some(hash))) => TransactionIdentifier::Hash(hash), _ => TransactionIdentifier::Latest, }, @@ -341,7 +341,7 @@ where ) { (Ok(Some(height)), Ok(Some(offset)), _) => { TransactionIdentifier::HeightAndOffset(height, offset) - } + }, (_, _, Ok(Some(hash))) => TransactionIdentifier::Hash(hash), _ => TransactionIdentifier::Latest, }; diff --git a/hotshot-query-service/src/explorer/errors.rs b/hotshot-query-service/src/explorer/errors.rs index 90c6a0ac1d..5908b4d787 100644 --- a/hotshot-query-service/src/explorer/errors.rs +++ b/hotshot-query-service/src/explorer/errors.rs @@ -417,7 +417,7 @@ mod test { let want = query_error; match &have.error { - crate::QueryError::NotFound => {} + crate::QueryError::NotFound => {}, _ => panic!("deserialized QueryError mismatch: have: {have}, want: {want}"), } } diff --git a/hotshot-query-service/src/explorer/monetary_value.rs b/hotshot-query-service/src/explorer/monetary_value.rs index 399e18e021..b30b63f513 100644 --- a/hotshot-query-service/src/explorer/monetary_value.rs +++ b/hotshot-query-service/src/explorer/monetary_value.rs @@ -195,7 +195,7 @@ where return Err(E::custom( "no non-breaking space found in expected MonetaryValue", )) - } + }, }; let first: String = value.chars().take(index).collect(); @@ -244,7 +244,7 @@ fn determine_pre_and_post_decimal_strings(value: &str) -> (String, Option { panic!("{} failed to parse: {}", value, err); - } + }, Ok(result) => result, }; @@ -436,7 +436,7 @@ mod test { let result = match result { Err(err) => { panic!("{} failed to parse: {}", value, err); - } + }, Ok(result) => result, }; diff --git a/hotshot-query-service/src/explorer/query_data.rs b/hotshot-query-service/src/explorer/query_data.rs index 7b4f0b3de7..8493ef1542 100644 --- a/hotshot-query-service/src/explorer/query_data.rs +++ b/hotshot-query-service/src/explorer/query_data.rs @@ -79,7 +79,7 @@ impl Display for TransactionIdentifier { TransactionIdentifier::Latest => write!(f, "latest"), TransactionIdentifier::HeightAndOffset(height, offset) => { write!(f, "{} {}", height, offset) - } + }, TransactionIdentifier::Hash(hash) => write!(f, "{}", hash), } } diff --git a/hotshot-query-service/src/fetching.rs b/hotshot-query-service/src/fetching.rs index 427f482553..0490bebd02 100644 --- a/hotshot-query-service/src/fetching.rs +++ b/hotshot-query-service/src/fetching.rs @@ -122,12 +122,12 @@ impl Fetcher { e.get_mut().extend(callbacks); tracing::info!(?req, callbacks = ?e.get(), "resource is already being fetched"); return; - } + }, Entry::Vacant(e) => { // If the object is not being fetched, we will register our own callback and // then fetch it ourselves. e.insert(callbacks.into_iter().collect()); - } + }, } } diff --git a/hotshot-query-service/src/fetching/provider/any.rs b/hotshot-query-service/src/fetching/provider/any.rs index 0896abdce4..f3dde2f568 100644 --- a/hotshot-query-service/src/fetching/provider/any.rs +++ b/hotshot-query-service/src/fetching/provider/any.rs @@ -191,7 +191,7 @@ where providers.len() ); continue; - } + }, } } diff --git a/hotshot-query-service/src/fetching/provider/query_service.rs b/hotshot-query-service/src/fetching/provider/query_service.rs index dbf09aa900..f6f65878db 100644 --- a/hotshot-query-service/src/fetching/provider/query_service.rs +++ b/hotshot-query-service/src/fetching/provider/query_service.rs @@ -79,7 +79,7 @@ where Err(err) => { tracing::error!(%err, "unable to compute VID commitment"); return None; - } + }, }, ); if commit != req.0 { @@ -91,11 +91,11 @@ where } Some(payload.data) - } + }, Err(err) => { tracing::error!("failed to fetch payload {req:?}: {err}"); None - } + }, } } } @@ -134,11 +134,11 @@ where leaf.leaf.unfill_block_payload(); Some(leaf) - } + }, Err(err) => { tracing::error!("failed to fetch leaf {req:?}: {err}"); None - } + }, } } } @@ -171,18 +171,18 @@ where tracing::error!(?req, ?res, "Expect VID common data but found None"); None } - } + }, VidCommitment::V1(_) => { if res.common.is_some() { tracing::warn!(?req, ?res, "Expect no VID common data but found some.") } None - } + }, }, Err(err) => { tracing::error!("failed to fetch VID common {req:?}: {err}"); None - } + }, } } } @@ -1201,7 +1201,7 @@ mod test { .as_ref() .fail_begins_writable(FailableAction::Any) .await - } + }, FailureType::Write => data_source.as_ref().fail_writes(FailableAction::Any).await, FailureType::Commit => data_source.as_ref().fail_commits(FailableAction::Any).await, } @@ -1304,19 +1304,19 @@ mod test { .as_ref() .fail_one_begin_writable(FailableAction::Any) .await - } + }, FailureType::Write => { data_source .as_ref() .fail_one_write(FailableAction::Any) .await - } + }, FailureType::Commit => { data_source .as_ref() .fail_one_commit(FailableAction::Any) .await - } + }, } assert_eq!(leaves[0], data_source.get_leaf(1).await.await); @@ -1882,7 +1882,7 @@ mod test { for (leaf, payload) in leaves.iter().zip(payloads) { assert_eq!(payload.block_hash, leaf.block_hash()); } - } + }, MetadataType::Vid => { let vids = data_source.subscribe_vid_common_metadata(1).await.take(3); @@ -1895,7 +1895,7 @@ mod test { for (leaf, vid) in leaves.iter().zip(vids) { assert_eq!(vid.block_hash, leaf.block_hash()); } - } + }, } } diff --git a/hotshot-stake-table/src/mt_based.rs b/hotshot-stake-table/src/mt_based.rs index a6804662be..f96cf53a58 100644 --- a/hotshot-stake-table/src/mt_based.rs +++ b/hotshot-stake-table/src/mt_based.rs @@ -100,7 +100,7 @@ impl StakeTableScheme for StakeTable { Some(index) => { let branches = to_merkle_path(*index, self.height); root.simple_lookup(self.height, &branches) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -116,7 +116,7 @@ impl StakeTableScheme for StakeTable { Some(index) => { let branches = to_merkle_path(*index, self.height); root.lookup(self.height, &branches) - } + }, None => Err(StakeTableError::KeyNotFound), }?; let amount = *proof.value().ok_or(StakeTableError::KeyNotFound)?; @@ -149,7 +149,7 @@ impl StakeTableScheme for StakeTable { negative, )?; Ok(value) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -221,7 +221,7 @@ impl StakeTable { value, )?; Ok(old_value) - } + }, None => Err(StakeTableError::KeyNotFound), } } diff --git a/hotshot-stake-table/src/mt_based/internal.rs b/hotshot-stake-table/src/mt_based/internal.rs index b08301ee30..d28233f2e7 100644 --- a/hotshot-stake-table/src/mt_based/internal.rs +++ b/hotshot-stake-table/src/mt_based/internal.rs @@ -155,10 +155,10 @@ impl MerkleProof { let comm = Digest::evaluate(input) .map_err(|_| StakeTableError::RescueError)?[0]; Ok(comm) - } + }, MerklePathEntry::Leaf { .. } => Err(StakeTableError::MalformedProof), }) - } + }, _ => Err(StakeTableError::MalformedProof), } } @@ -305,7 +305,7 @@ impl PersistentMerkleNode { siblings: siblings.try_into().unwrap(), }); Ok(proof) - } + }, PersistentMerkleNode::Leaf { comm: _, key, @@ -341,7 +341,7 @@ impl PersistentMerkleNode { ptr += 1; } children[ptr].key_by_stake(stake_number) - } + }, PersistentMerkleNode::Leaf { comm: _, key, @@ -441,7 +441,7 @@ impl PersistentMerkleNode { }), value, )) - } + }, PersistentMerkleNode::Leaf { comm: _, key: node_key, @@ -473,7 +473,7 @@ impl PersistentMerkleNode { } else { Err(StakeTableError::MismatchedKey) } - } + }, } } @@ -518,7 +518,7 @@ impl PersistentMerkleNode { old_value, )) } - } + }, PersistentMerkleNode::Leaf { comm: _, key: cur_key, @@ -541,7 +541,7 @@ impl PersistentMerkleNode { } else { Err(StakeTableError::MismatchedKey) } - } + }, } } } @@ -584,7 +584,7 @@ impl Iterator for IntoIter { // put the left-most child to the last, so it is visited first. self.unvisited.extend(children.into_iter().rev()); self.next() - } + }, PersistentMerkleNode::Leaf { comm: _, key, diff --git a/hotshot-stake-table/src/vec_based.rs b/hotshot-stake-table/src/vec_based.rs index 6267dd21ca..0117e302d5 100644 --- a/hotshot-stake-table/src/vec_based.rs +++ b/hotshot-stake-table/src/vec_based.rs @@ -126,7 +126,7 @@ where self.head_total_stake -= self.head.stake_amount[*pos]; self.head.stake_amount[*pos] = U256::zero(); Ok(()) - } + }, None => Err(StakeTableError::KeyNotFound), } } @@ -306,7 +306,7 @@ where self.head_total_stake -= old_value; self.head_total_stake += value; Ok(old_value) - } + }, None => Err(StakeTableError::KeyNotFound), } } diff --git a/hotshot-stake-table/src/vec_based/config.rs b/hotshot-stake-table/src/vec_based/config.rs index 4415555142..a752088765 100644 --- a/hotshot-stake-table/src/vec_based/config.rs +++ b/hotshot-stake-table/src/vec_based/config.rs @@ -41,7 +41,7 @@ impl ToFields for QCVerKey { FieldType::from_le_bytes_mod_order(&bytes[31..62]), FieldType::from_le_bytes_mod_order(&bytes[62..]), ] - } + }, Err(_) => unreachable!(), } } diff --git a/hotshot-state-prover/src/service.rs b/hotshot-state-prover/src/service.rs index 60a31acc31..4cabae70a1 100644 --- a/hotshot-state-prover/src/service.rs +++ b/hotshot-state-prover/src/service.rs @@ -155,12 +155,12 @@ async fn init_stake_table_from_sequencer( Err(e) => { tracing::error!("Failed to parse the network config: {e}"); sleep(Duration::from_secs(5)).await; - } + }, }, Err(e) => { tracing::error!("Failed to fetch the network config: {e}"); sleep(Duration::from_secs(5)).await; - } + }, } }; @@ -288,7 +288,7 @@ pub async fn read_contract_state( Err(e) => { tracing::error!("unable to read finalized_state from contract: {}", e); return Err(ProverError::ContractError(e.into())); - } + }, }; let st_state: ParsedStakeTableState = match contract.genesis_stake_table_state().call().await { Ok(s) => s.into(), @@ -298,7 +298,7 @@ pub async fn read_contract_state( e ); return Err(ProverError::ContractError(e.into())); - } + }, }; Ok((state.into(), st_state.into())) @@ -330,10 +330,10 @@ pub async fn submit_state_and_proof( priority_fee ); } - } + }, Err(e) => { tracing::warn!("!! BlockNative Price Oracle failed: {}", e); - } + }, } } diff --git a/hotshot-task-impls/src/builder.rs b/hotshot-task-impls/src/builder.rs index d40d041e6d..ca5acd10ef 100644 --- a/hotshot-task-impls/src/builder.rs +++ b/hotshot-task-impls/src/builder.rs @@ -43,10 +43,10 @@ impl From for BuilderClientError { match value { BuilderApiError::Request(source) | BuilderApiError::TxnUnpack(source) => { Self::Api(source.to_string()) - } + }, BuilderApiError::TxnSubmit(source) | BuilderApiError::BuilderAddress(source) => { Self::Api(source.to_string()) - } + }, BuilderApiError::Custom { message, .. } => Self::Api(message), BuilderApiError::BlockAvailable { source, .. } | BuilderApiError::BlockClaim { source, .. } => match source { diff --git a/hotshot-task-impls/src/consensus/mod.rs b/hotshot-task-impls/src/consensus/mod.rs index 6e303ee4fd..531a074945 100644 --- a/hotshot-task-impls/src/consensus/mod.rs +++ b/hotshot-task-impls/src/consensus/mod.rs @@ -120,14 +120,14 @@ impl, V: Versions> ConsensusTaskSt { tracing::debug!("Failed to handle QuorumVoteRecv event; error = {e}"); } - } + }, HotShotEvent::TimeoutVoteRecv(ref vote) => { if let Err(e) = handle_timeout_vote_recv(vote, Arc::clone(&event), &sender, self).await { tracing::debug!("Failed to handle TimeoutVoteRecv event; error = {e}"); } - } + }, HotShotEvent::ViewChange(new_view_number, epoch_number) => { if let Err(e) = handle_view_change(*new_view_number, *epoch_number, &sender, &receiver, self) @@ -136,12 +136,12 @@ impl, V: Versions> ConsensusTaskSt tracing::trace!("Failed to handle ViewChange event; error = {e}"); } self.view_start_time = Instant::now(); - } + }, HotShotEvent::Timeout(view_number, epoch) => { if let Err(e) = handle_timeout(*view_number, *epoch, &sender, self).await { tracing::debug!("Failed to handle Timeout event; error = {e}"); } - } + }, HotShotEvent::ExtendedQc2Formed(eqc) => { let cert_view = eqc.view_number(); let cert_block_number = self @@ -168,7 +168,7 @@ impl, V: Versions> ConsensusTaskSt &sender, ) .await; - } + }, HotShotEvent::ExtendedQcRecv(high_qc, next_epoch_high_qc, _) => { if !self .consensus @@ -217,8 +217,8 @@ impl, V: Versions> ConsensusTaskSt ) .await; } - } - _ => {} + }, + _ => {}, } Ok(()) diff --git a/hotshot-task-impls/src/da.rs b/hotshot-task-impls/src/da.rs index f8bf3add36..b858923ffe 100644 --- a/hotshot-task-impls/src/da.rs +++ b/hotshot-task-impls/src/da.rs @@ -141,7 +141,7 @@ impl, V: Versions> DaTaskState { let cur_view = self.consensus.read().await.cur_view(); let view_number = proposal.data.view_number(); @@ -315,7 +315,7 @@ impl, V: Versions> DaTaskState { tracing::debug!("DA vote recv, Main Task {:?}", vote.view_number()); // Check if we are the leader and the vote is from the sender. @@ -348,7 +348,7 @@ impl, V: Versions> DaTaskState { if *epoch > self.cur_epoch { self.cur_epoch = *epoch; @@ -364,7 +364,7 @@ impl, V: Versions> DaTaskState { let PackedBundle:: { encoded_transactions, @@ -434,8 +434,8 @@ impl, V: Versions> DaTaskState {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/events.rs b/hotshot-task-impls/src/events.rs index a1e828f27a..f764114889 100644 --- a/hotshot-task-impls/src/events.rs +++ b/hotshot-task-impls/src/events.rs @@ -284,7 +284,7 @@ impl HotShotEvent { HotShotEvent::QuorumVoteRecv(v) => Some(v.view_number()), HotShotEvent::TimeoutVoteRecv(v) | HotShotEvent::TimeoutVoteSend(v) => { Some(v.view_number()) - } + }, HotShotEvent::QuorumProposalRecv(proposal, _) | HotShotEvent::QuorumProposalSend(proposal, _) | HotShotEvent::QuorumProposalValidated(proposal, _) @@ -292,16 +292,16 @@ impl HotShotEvent { | HotShotEvent::QuorumProposalResponseSend(_, proposal) | HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { Some(proposal.data.view_number()) - } + }, HotShotEvent::QuorumVoteSend(vote) | HotShotEvent::ExtendedQuorumVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::DaProposalRecv(proposal, _) | HotShotEvent::DaProposalValidated(proposal, _) | HotShotEvent::DaProposalSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::DaVoteRecv(vote) | HotShotEvent::DaVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::QcFormed(cert) => match cert { either::Left(qc) => Some(qc.view_number()), either::Right(tc) => Some(tc.view_number()), @@ -329,7 +329,7 @@ impl HotShotEvent { | HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => Some(cert.view_number()), HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { Some(*view_number) - } + }, HotShotEvent::BlockRecv(packed_bundle) => Some(packed_bundle.view_number), HotShotEvent::Shutdown | HotShotEvent::TransactionSend(_, _) @@ -337,12 +337,12 @@ impl HotShotEvent { HotShotEvent::VidDisperseSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::VidShareRecv(_, proposal) | HotShotEvent::VidShareValidated(proposal) => { Some(proposal.data.view_number()) - } + }, HotShotEvent::UpgradeProposalRecv(proposal, _) | HotShotEvent::UpgradeProposalSend(proposal, _) => Some(proposal.data.view_number()), HotShotEvent::UpgradeVoteRecv(vote) | HotShotEvent::UpgradeVoteSend(vote) => { Some(vote.view_number()) - } + }, HotShotEvent::QuorumProposalRequestSend(req, _) | HotShotEvent::QuorumProposalRequestRecv(req, _) => Some(req.view_number), HotShotEvent::ViewChange(view_number, _) @@ -351,7 +351,7 @@ impl HotShotEvent { | HotShotEvent::Timeout(view_number, ..) => Some(*view_number), HotShotEvent::DaCertificateRecv(cert) | HotShotEvent::DacSend(cert, _) => { Some(cert.view_number()) - } + }, HotShotEvent::DaCertificateValidated(cert) => Some(cert.view_number), HotShotEvent::UpgradeCertificateFormed(cert) => Some(cert.view_number()), HotShotEvent::VidRequestSend(request, _, _) @@ -378,20 +378,20 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumVoteRecv(v) => { write!(f, "QuorumVoteRecv(view_number={:?})", v.view_number()) - } + }, HotShotEvent::ExtendedQuorumVoteSend(v) => { write!( f, "ExtendedQuorumVoteSend(view_number={:?})", v.view_number() ) - } + }, HotShotEvent::TimeoutVoteRecv(v) => { write!(f, "TimeoutVoteRecv(view_number={:?})", v.view_number()) - } + }, HotShotEvent::TimeoutVoteSend(v) => { write!(f, "TimeoutVoteSend(view_number={:?})", v.view_number()) - } + }, HotShotEvent::DaProposalRecv(proposal, _) => write!( f, "DaProposalRecv(view_number={:?})", @@ -404,10 +404,10 @@ impl Display for HotShotEvent { ), HotShotEvent::DaVoteRecv(vote) => { write!(f, "DaVoteRecv(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::DaCertificateRecv(cert) => { write!(f, "DaCertificateRecv(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::DaCertificateValidated(cert) => write!( f, "DaCertificateValidated(view_number={:?})", @@ -420,7 +420,7 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumVoteSend(vote) => { write!(f, "QuorumVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::QuorumProposalValidated(proposal, _) => write!( f, "QuorumProposalValidated(view_number={:?})", @@ -433,7 +433,7 @@ impl Display for HotShotEvent { ), HotShotEvent::DaVoteSend(vote) => { write!(f, "DaVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::QcFormed(cert) => match cert { either::Left(qc) => write!(f, "QcFormed(view_number={:?})", qc.view_number()), either::Right(tc) => write!(f, "QcFormed(view_number={:?})", tc.view_number()), @@ -445,26 +445,26 @@ impl Display for HotShotEvent { HotShotEvent::NextEpochQc2Formed(cert) => match cert { either::Left(qc) => { write!(f, "NextEpochQc2Formed(view_number={:?})", qc.view_number()) - } + }, either::Right(tc) => { write!(f, "NextEpochQc2Formed(view_number={:?})", tc.view_number()) - } + }, }, HotShotEvent::ExtendedQc2Formed(cert) => { write!(f, "ExtendedQc2Formed(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::DacSend(cert, _) => { write!(f, "DacSend(view_number={:?})", cert.view_number()) - } + }, HotShotEvent::ViewChange(view_number, epoch_number) => { write!( f, "ViewChange(view_number={view_number:?}, epoch_number={epoch_number:?})" ) - } + }, HotShotEvent::ViewSyncTimeout(view_number, _, _) => { write!(f, "ViewSyncTimeout(view_number={view_number:?})") - } + }, HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => write!( f, "ViewSyncPreCommitVoteRecv(view_number={:?})", @@ -501,48 +501,48 @@ impl Display for HotShotEvent { "ViewSyncPreCommitCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(cert) => { write!( f, "ViewSyncCommitCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { write!( f, "ViewSyncFinalizeCertificateRecv(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => { write!( f, "ViewSyncPreCommitCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => { write!( f, "ViewSyncCommitCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => { write!( f, "ViewSyncFinalizeCertificateSend(view_number={:?})", cert.view_number() ) - } + }, HotShotEvent::ViewSyncTrigger(view_number) => { write!(f, "ViewSyncTrigger(view_number={view_number:?})") - } + }, HotShotEvent::Timeout(view_number, epoch) => { write!(f, "Timeout(view_number={view_number:?}, epoch={epoch:?})") - } + }, HotShotEvent::TransactionsRecv(_) => write!(f, "TransactionsRecv"), HotShotEvent::TransactionSend(_, _) => write!(f, "TransactionSend"), HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { @@ -550,10 +550,10 @@ impl Display for HotShotEvent { f, "SendPayloadCommitmentAndMetadata(view_number={view_number:?})" ) - } + }, HotShotEvent::BlockRecv(packed_bundle) => { write!(f, "BlockRecv(view_number={:?})", packed_bundle.view_number) - } + }, HotShotEvent::VidDisperseSend(proposal, _) => write!( f, "VidDisperseSend(view_number={:?})", @@ -581,10 +581,10 @@ impl Display for HotShotEvent { ), HotShotEvent::UpgradeVoteRecv(vote) => { write!(f, "UpgradeVoteRecv(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::UpgradeVoteSend(vote) => { write!(f, "UpgradeVoteSend(view_number={:?})", vote.view_number()) - } + }, HotShotEvent::UpgradeCertificateFormed(cert) => write!( f, "UpgradeCertificateFormed(view_number={:?})", @@ -592,63 +592,63 @@ impl Display for HotShotEvent { ), HotShotEvent::QuorumProposalRequestSend(view_number, _) => { write!(f, "QuorumProposalRequestSend(view_number={view_number:?})") - } + }, HotShotEvent::QuorumProposalRequestRecv(view_number, _) => { write!(f, "QuorumProposalRequestRecv(view_number={view_number:?})") - } + }, HotShotEvent::QuorumProposalResponseSend(_, proposal) => { write!( f, "QuorumProposalResponseSend(view_number={:?})", proposal.data.view_number() ) - } + }, HotShotEvent::QuorumProposalResponseRecv(proposal) => { write!( f, "QuorumProposalResponseRecv(view_number={:?})", proposal.data.view_number() ) - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { write!( f, "QuorumProposalPreliminarilyValidated(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::VidRequestSend(request, _, _) => { write!(f, "VidRequestSend(view_number={:?}", request.view) - } + }, HotShotEvent::VidRequestRecv(request, _) => { write!(f, "VidRequestRecv(view_number={:?}", request.view) - } + }, HotShotEvent::VidResponseSend(_, _, proposal) => { write!( f, "VidResponseSend(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::VidResponseRecv(_, proposal) => { write!( f, "VidResponseRecv(view_number={:?}", proposal.data.view_number() ) - } + }, HotShotEvent::HighQcRecv(qc, _) => { write!(f, "HighQcRecv(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::HighQcSend(qc, ..) => { write!(f, "HighQcSend(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::ExtendedQcRecv(qc, ..) => { write!(f, "ExtendedQcRecv(view_number={:?}", qc.view_number()) - } + }, HotShotEvent::ExtendedQcSend(qc, ..) => { write!(f, "ExtendedQcSend(view_number={:?}", qc.view_number()) - } + }, } } } diff --git a/hotshot-task-impls/src/helpers.rs b/hotshot-task-impls/src/helpers.rs index 5fc78869db..a3a67f8311 100644 --- a/hotshot-task-impls/src/helpers.rs +++ b/hotshot-task-impls/src/helpers.rs @@ -789,7 +789,7 @@ pub(crate) async fn validate_proposal_view_and_certs< *view_number, e ) })?; - } + }, ViewChangeEvidence2::ViewSync(view_sync_cert) => { ensure!( view_sync_cert.view_number == view_number, @@ -813,7 +813,7 @@ pub(crate) async fn validate_proposal_view_and_certs< ) .await .context(|e| warn!("Invalid view sync finalize cert provided: {}", e))?; - } + }, } } @@ -846,13 +846,13 @@ pub async fn broadcast_event(event: E, sender: &Send "Event sender queue overflow, Oldest event removed form queue: {:?}", overflowed ); - } + }, Err(SendError(e)) => { tracing::warn!( "Event: {:?}\n Sending failed, event stream probably shutdown", e ); - } + }, } } diff --git a/hotshot-task-impls/src/network.rs b/hotshot-task-impls/src/network.rs index 8994a594fb..a7ebb87c0a 100644 --- a/hotshot-task-impls/src/network.rs +++ b/hotshot-task-impls/src/network.rs @@ -85,7 +85,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) - } + }, GeneralConsensusMessage::Proposal2(proposal) => { if !self .upgrade_lock @@ -96,10 +96,10 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalRecv(convert_proposal(proposal), sender) - } + }, GeneralConsensusMessage::ProposalRequested(req, sig) => { HotShotEvent::QuorumProposalRequestRecv(req, sig) - } + }, GeneralConsensusMessage::ProposalResponse(proposal) => { if self .upgrade_lock @@ -110,7 +110,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) - } + }, GeneralConsensusMessage::ProposalResponse2(proposal) => { if !self .upgrade_lock @@ -121,21 +121,21 @@ impl NetworkMessageTaskState { return; } HotShotEvent::QuorumProposalResponseRecv(convert_proposal(proposal)) - } + }, GeneralConsensusMessage::Vote(vote) => { if self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received GeneralConsensusMessage::Vote for view {} but epochs are enabled for that view", vote.view_number()); return; } HotShotEvent::QuorumVoteRecv(vote.to_vote2()) - } + }, GeneralConsensusMessage::Vote2(vote) => { if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received GeneralConsensusMessage::Vote2 for view {} but epochs are not enabled for that view", vote.view_number()); return; } HotShotEvent::QuorumVoteRecv(vote) - } + }, GeneralConsensusMessage::ViewSyncPreCommitVote(view_sync_message) => { if self .upgrade_lock @@ -146,7 +146,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncPreCommitVote2(view_sync_message) => { if !self .upgrade_lock @@ -157,7 +157,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, ) => { @@ -172,7 +172,7 @@ impl NetworkMessageTaskState { HotShotEvent::ViewSyncPreCommitCertificateRecv( view_sync_message.to_vsc2(), ) - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate2( view_sync_message, ) => { @@ -185,7 +185,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { if self .upgrade_lock @@ -196,7 +196,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncCommitVote2(view_sync_message) => { if !self .upgrade_lock @@ -207,7 +207,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { if self .upgrade_lock @@ -218,7 +218,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message.to_vsc2()) - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(view_sync_message) => { if !self .upgrade_lock @@ -229,7 +229,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { if self .upgrade_lock @@ -240,7 +240,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message.to_vote2()) - } + }, GeneralConsensusMessage::ViewSyncFinalizeVote2(view_sync_message) => { if !self .upgrade_lock @@ -251,7 +251,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { if self .upgrade_lock @@ -264,7 +264,7 @@ impl NetworkMessageTaskState { HotShotEvent::ViewSyncFinalizeCertificateRecv( view_sync_message.to_vsc2(), ) - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate2( view_sync_message, ) => { @@ -277,7 +277,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) - } + }, GeneralConsensusMessage::TimeoutVote(message) => { if self .upgrade_lock @@ -288,7 +288,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::TimeoutVoteRecv(message.to_vote2()) - } + }, GeneralConsensusMessage::TimeoutVote2(message) => { if !self .upgrade_lock @@ -299,18 +299,18 @@ impl NetworkMessageTaskState { return; } HotShotEvent::TimeoutVoteRecv(message) - } + }, GeneralConsensusMessage::UpgradeProposal(message) => { HotShotEvent::UpgradeProposalRecv(message, sender) - } + }, GeneralConsensusMessage::UpgradeVote(message) => { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) - } + }, GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), GeneralConsensusMessage::ExtendedQc(qc, next_epoch_qc) => { HotShotEvent::ExtendedQcRecv(qc, next_epoch_qc, sender) - } + }, }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { @@ -323,7 +323,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::DaProposalRecv(convert_proposal(proposal), sender) - } + }, DaConsensusMessage::DaProposal2(proposal) => { if !self .upgrade_lock @@ -334,35 +334,35 @@ impl NetworkMessageTaskState { return; } HotShotEvent::DaProposalRecv(proposal, sender) - } + }, DaConsensusMessage::DaVote(vote) => { if self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received DaConsensusMessage::DaVote for view {} but epochs are enabled for that view", vote.view_number()); return; } HotShotEvent::DaVoteRecv(vote.clone().to_vote2()) - } + }, DaConsensusMessage::DaVote2(vote) => { if !self.upgrade_lock.epochs_enabled(vote.view_number()).await { tracing::warn!("received DaConsensusMessage::DaVote2 for view {} but epochs are not enabled for that view", vote.view_number()); return; } HotShotEvent::DaVoteRecv(vote.clone()) - } + }, DaConsensusMessage::DaCertificate(cert) => { if self.upgrade_lock.epochs_enabled(cert.view_number()).await { tracing::warn!("received DaConsensusMessage::DaCertificate for view {} but epochs are enabled for that view", cert.view_number()); return; } HotShotEvent::DaCertificateRecv(cert.to_dac2()) - } + }, DaConsensusMessage::DaCertificate2(cert) => { if !self.upgrade_lock.epochs_enabled(cert.view_number()).await { tracing::warn!("received DaConsensusMessage::DaCertificate2 for view {} but epochs are not enabled for that view", cert.view_number()); return; } HotShotEvent::DaCertificateRecv(cert) - } + }, DaConsensusMessage::VidDisperseMsg(proposal) => { if self .upgrade_lock @@ -373,7 +373,7 @@ impl NetworkMessageTaskState { return; } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) - } + }, DaConsensusMessage::VidDisperseMsg2(proposal) => { if !self .upgrade_lock @@ -384,11 +384,11 @@ impl NetworkMessageTaskState { return; } HotShotEvent::VidShareRecv(sender, convert_proposal(proposal)) - } + }, }, }; broadcast_event(Arc::new(event), &self.internal_event_stream).await; - } + }, // Handle data messages MessageKind::Data(message) => match message { @@ -403,7 +403,7 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } + }, DataMessage::DataResponse(response) => { if let ResponseMessage::Found(message) = response { match message { @@ -416,7 +416,7 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } + }, SequencingMessage::Da(DaConsensusMessage::VidDisperseMsg2( proposal, )) => { @@ -428,11 +428,11 @@ impl NetworkMessageTaskState { &self.internal_event_stream, ) .await; - } - _ => {} + }, + _ => {}, } } - } + }, DataMessage::RequestData(data) => { let req_data = data.clone(); if let RequestKind::Vid(_view_number, _key) = req_data.request { @@ -442,7 +442,7 @@ impl NetworkMessageTaskState { ) .await; } - } + }, }, // Handle external messages @@ -459,7 +459,7 @@ impl NetworkMessageTaskState { &self.external_event_stream, ) .await; - } + }, } } } @@ -607,7 +607,7 @@ impl< Err(e) => { tracing::error!("Failed to serialize message: {}", e); continue; - } + }, }; messages.insert(recipient, serialized_message); @@ -630,7 +630,7 @@ impl< return; } match net.vid_broadcast_message(messages).await { - Ok(()) => {} + Ok(()) => {}, Err(e) => tracing::warn!("Failed to send message from network task: {:?}", e), } }); @@ -665,7 +665,7 @@ impl< Err(e) => { tracing::warn!("Not Sending {:?} because of storage error: {:?}", action, e); Err(()) - } + }, } } else { Ok(()) @@ -718,7 +718,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, // ED Each network task is subscribed to all these message types. Need filters per network task HotShotEvent::QuorumVoteSend(vote) => { @@ -740,7 +740,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { @@ -754,7 +754,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ExtendedQuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { @@ -768,7 +768,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Broadcast)) - } + }, HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), MessageKind::::from_consensus_message(SequencingMessage::General( @@ -796,11 +796,11 @@ impl< message, TransmitType::Direct(sender_key), )) - } + }, HotShotEvent::VidDisperseSend(proposal, sender) => { self.handle_vid_disperse_proposal(proposal, &sender).await; None - } + }, HotShotEvent::DaProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::DaPropose); @@ -819,7 +819,7 @@ impl< }; Some((sender, message, TransmitType::DaCommitteeBroadcast)) - } + }, HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); let view_number = vote.view_number(); @@ -839,7 +839,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -853,7 +853,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::DacSend(certificate, sender) => { *maybe_action = Some(HotShotAction::DaCert); let message = if self @@ -871,7 +871,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; let leader = match self @@ -890,7 +890,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -903,7 +903,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncCommitVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; @@ -923,7 +923,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -936,7 +936,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncFinalizeVoteSend(vote) => { *maybe_action = Some(HotShotAction::ViewSyncVote); let view_number = vote.view_number() + vote.date().relay; @@ -956,7 +956,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -969,7 +969,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -983,7 +983,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -997,7 +997,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => { let view_number = certificate.view_number(); let message = if self.upgrade_lock.epochs_enabled(view_number).await { @@ -1011,7 +1011,7 @@ impl< }; Some((sender, message, TransmitType::Broadcast)) - } + }, HotShotEvent::TimeoutVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); let view_number = vote.view_number() + 1; @@ -1031,7 +1031,7 @@ impl< e ); return None; - } + }, }; let message = if self.upgrade_lock.epochs_enabled(vote.view_number()).await { MessageKind::::from_consensus_message(SequencingMessage::General( @@ -1044,7 +1044,7 @@ impl< }; Some((vote.signing_key(), message, TransmitType::Direct(leader))) - } + }, HotShotEvent::UpgradeProposalSend(proposal, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( @@ -1071,7 +1071,7 @@ impl< e ); return None; - } + }, }; Some(( vote.signing_key(), @@ -1080,7 +1080,7 @@ impl< )), TransmitType::Direct(leader), )) - } + }, HotShotEvent::ViewChange(view, epoch) => { self.view = view; if epoch > self.epoch { @@ -1096,7 +1096,7 @@ impl< .await; }); None - } + }, HotShotEvent::VidRequestSend(req, sender, to) => Some(( sender, MessageKind::Data(DataMessage::RequestData(req)), @@ -1126,7 +1126,7 @@ impl< vid_share_proposal, )), ))) - } + }, VidDisperseShare::V1(data) => { if !epochs_enabled { tracing::warn!( @@ -1145,10 +1145,10 @@ impl< vid_share_proposal, )), ))) - } + }, }; Some((sender, message, TransmitType::Direct(to))) - } + }, HotShotEvent::HighQcSend(quorum_cert, leader, sender) => Some(( sender, MessageKind::Consensus(SequencingMessage::General( @@ -1234,18 +1234,18 @@ impl< Err(e) => { tracing::error!("Failed to serialize message: {}", e); return; - } + }, }; let transmit_result = match transmit { TransmitType::Direct(recipient) => { network.direct_message(serialized_message, recipient).await - } + }, TransmitType::Broadcast => { network .broadcast_message(serialized_message, committee_topic, broadcast_delay) .await - } + }, TransmitType::DaCommitteeBroadcast => { network .da_broadcast_message( @@ -1254,11 +1254,11 @@ impl< broadcast_delay, ) .await - } + }, }; match transmit_result { - Ok(()) => {} + Ok(()) => {}, Err(e) => tracing::warn!("Failed to send message task: {:?}", e), } }); diff --git a/hotshot-task-impls/src/quorum_proposal/handlers.rs b/hotshot-task-impls/src/quorum_proposal/handlers.rs index 2203370d3d..5bf2e682d8 100644 --- a/hotshot-task-impls/src/quorum_proposal/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal/handlers.rs @@ -431,22 +431,22 @@ impl HandleDepOutput for ProposalDependencyHandle< block_view: *view, auction_result: auction_result.clone(), }); - } + }, HotShotEvent::Qc2Formed(cert) => match cert { either::Right(timeout) => { timeout_certificate = Some(timeout.clone()); - } + }, either::Left(qc) => { parent_qc = Some(qc.clone()); - } + }, }, HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { view_sync_finalize_cert = Some(cert.clone()); - } + }, HotShotEvent::VidDisperseSend(share, _) => { vid_share = Some(share.clone()); - } - _ => {} + }, + _ => {}, } } diff --git a/hotshot-task-impls/src/quorum_proposal/mod.rs b/hotshot-task-impls/src/quorum_proposal/mod.rs index 88c09de09e..ba94007f99 100644 --- a/hotshot-task-impls/src/quorum_proposal/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal/mod.rs @@ -115,14 +115,14 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::TimeoutCert => { if let HotShotEvent::Qc2Formed(either::Right(timeout)) = event { timeout.view_number() + 1 } else { return false; } - } + }, ProposalDependency::ViewSyncCert => { if let HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_cert) = event { @@ -130,7 +130,7 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::Proposal => { if let HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) = event { @@ -138,7 +138,7 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::PayloadAndMetadata => { if let HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, @@ -153,14 +153,14 @@ impl, V: Versions> } else { return false; } - } + }, ProposalDependency::VidShare => { if let HotShotEvent::VidDisperseSend(vid_disperse, _) = event { vid_disperse.data.view_number() } else { return false; } - } + }, }; let valid = event_view == view_number; if valid { @@ -219,25 +219,25 @@ impl, V: Versions> match event.as_ref() { HotShotEvent::SendPayloadCommitmentAndMetadata(..) => { payload_commitment_dependency.mark_as_completed(Arc::clone(&event)); - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(..) => { proposal_dependency.mark_as_completed(event); - } + }, HotShotEvent::Qc2Formed(quorum_certificate) => match quorum_certificate { Either::Right(_) => { timeout_dependency.mark_as_completed(event); - } + }, Either::Left(_) => { qc_dependency.mark_as_completed(event); - } + }, }, HotShotEvent::ViewSyncFinalizeCertificateRecv(_) => { view_sync_dependency.mark_as_completed(event); - } + }, HotShotEvent::VidDisperseSend(_, _) => { vid_share_dependency.mark_as_completed(event); - } - _ => {} + }, + _ => {}, }; // We have three cases to consider: @@ -410,7 +410,7 @@ impl, V: Versions> self.formed_upgrade_certificate = Some(cert.clone()); } - } + }, HotShotEvent::Qc2Formed(cert) => match cert.clone() { either::Right(timeout_cert) => { let view_number = timeout_cert.view_number + 1; @@ -423,7 +423,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, either::Left(qc) => { // Only update if the qc is from a newer view if qc.view_number <= self.consensus.read().await.high_qc().view_number { @@ -462,7 +462,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, }, HotShotEvent::SendPayloadCommitmentAndMetadata( _payload_commitment, @@ -483,7 +483,7 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = certificate.data.epoch; let epoch_membership = self @@ -521,7 +521,7 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::QuorumProposalPreliminarilyValidated(proposal) => { let view_number = proposal.data.view_number(); // All nodes get the latest proposed view as a proxy of `cur_view` of old. @@ -538,7 +538,7 @@ impl, V: Versions> epoch_transition_indicator, ) .await?; - } + }, HotShotEvent::QuorumProposalSend(proposal, _) => { let view = proposal.data.view_number(); @@ -546,7 +546,7 @@ impl, V: Versions> self.update_latest_proposed_view(view).await, "Failed to update latest proposed view" ); - } + }, HotShotEvent::VidDisperseSend(vid_disperse, _) => { let view_number = vid_disperse.data.view_number(); self.create_dependency_task_if_new( @@ -558,18 +558,18 @@ impl, V: Versions> EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewChange(view, epoch) => { if epoch > &self.cur_epoch { self.cur_epoch = *epoch; } let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); - } + }, HotShotEvent::Timeout(view, ..) => { let keep_view = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(keep_view); - } + }, HotShotEvent::HighQcSend(qc, ..) | HotShotEvent::ExtendedQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); let cert_epoch_number = qc.data.epoch; @@ -590,7 +590,7 @@ impl, V: Versions> .context(|e| warn!("Quorum certificate {:?} was invalid: {}", qc.data(), e))?; self.highest_qc = qc.clone(); - } + }, HotShotEvent::NextEpochQc2Formed(Either::Left(next_epoch_qc)) => { // Only update if the qc is from a newer view let current_next_epoch_qc = @@ -624,8 +624,8 @@ impl, V: Versions> &event_sender, ) .await; - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs index f965bbfada..a5dc3a8428 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/handlers.rs @@ -221,7 +221,7 @@ pub(crate) async fn handle_quorum_proposal_recv< } else { bail!("Parent state not found! Consensus internally inconsistent"); } - } + }, None => None, }; diff --git a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs index b56130f3b3..caa929b7eb 100644 --- a/hotshot-task-impls/src/quorum_proposal_recv/mod.rs +++ b/hotshot-task-impls/src/quorum_proposal_recv/mod.rs @@ -180,10 +180,10 @@ impl, V: Versions> ) .await { - Ok(()) => {} + Ok(()) => {}, Err(e) => error!(?e, "Failed to validate the proposal"), } - } + }, HotShotEvent::ViewChange(view, epoch) => { if *epoch > self.cur_epoch { self.cur_epoch = *epoch; @@ -198,8 +198,8 @@ impl, V: Versions> // to enter view V + 1. let oldest_view_to_keep = TYPES::View::new(view.saturating_sub(1)); self.cancel_tasks(oldest_view_to_keep); - } - _ => {} + }, + _ => {}, } } } diff --git a/hotshot-task-impls/src/quorum_vote/handlers.rs b/hotshot-task-impls/src/quorum_vote/handlers.rs index 19a926a0fd..f0a87cc699 100644 --- a/hotshot-task-impls/src/quorum_vote/handlers.rs +++ b/hotshot-task-impls/src/quorum_vote/handlers.rs @@ -111,7 +111,7 @@ async fn store_and_get_computed_drb_result< .await; task_state.drb_computation = None; Ok(result) - } + }, Err(e) => Err(warn!("Error in DRB calculation: {:?}.", e)), } } @@ -223,10 +223,10 @@ async fn start_drb_task, V: Versio .insert(*task_epoch, result); notify_membership_of_drb_result::(&epoch_membership, result).await; task_state.drb_computation = None; - } + }, Err(e) => { tracing::error!("error joining DRB computation task: {e:?}"); - } + }, } } else if *task_epoch == new_epoch_number { return; @@ -583,10 +583,10 @@ pub(crate) async fn update_shared_state< Some((leaf, view)) => { maybe_validated_view = Some(view); Some(leaf) - } + }, None => None, } - } + }, }; let parent = maybe_parent.context(info!( diff --git a/hotshot-task-impls/src/quorum_vote/mod.rs b/hotshot-task-impls/src/quorum_vote/mod.rs index e3d7af2771..19bd5ab71a 100644 --- a/hotshot-task-impls/src/quorum_vote/mod.rs +++ b/hotshot-task-impls/src/quorum_vote/mod.rs @@ -123,7 +123,7 @@ impl + 'static, V: Versions> Handl Err(e) => { tracing::error!("{e:#}"); return; - } + }, }; let proposal_payload_comm = proposal.data.block_header().payload_commitment(); let parent_commitment = parent_leaf.commit(); @@ -165,7 +165,7 @@ impl + 'static, V: Versions> Handl } leaf = Some(proposed_leaf); parent_view_number = Some(parent_leaf.view_number()); - } + }, HotShotEvent::DaCertificateValidated(cert) => { let cert_payload_comm = &cert.data().payload_commit; let next_epoch_cert_payload_comm = cert.data().next_epoch_payload_commit; @@ -187,7 +187,7 @@ impl + 'static, V: Versions> Handl } else { next_epoch_payload_commitment = next_epoch_cert_payload_comm; } - } + }, HotShotEvent::VidShareValidated(share) => { let vid_payload_commitment = &share.data.payload_commitment(); vid_share = Some(share.clone()); @@ -211,8 +211,8 @@ impl + 'static, V: Versions> Handl } else { payload_commitment = Some(*vid_payload_commitment); } - } - _ => {} + }, + _ => {}, } } @@ -269,7 +269,7 @@ impl + 'static, V: Versions> Handl Err(e) => { tracing::warn!("{:?}", e); return; - } + }, }; tracing::trace!( @@ -380,21 +380,21 @@ impl, V: Versions> QuorumVoteTaskS } else { return false; } - } + }, VoteDependency::Dac => { if let HotShotEvent::DaCertificateValidated(cert) = event { cert.view_number } else { return false; } - } + }, VoteDependency::Vid => { if let HotShotEvent::VidShareValidated(disperse) = event { disperse.data.view_number() } else { return false; } - } + }, }; if event_view == view_number { tracing::trace!( @@ -552,7 +552,7 @@ impl, V: Versions> QuorumVoteTaskS Arc::clone(&event), ); } - } + }, HotShotEvent::DaCertificateRecv(cert) => { let view = cert.view_number; @@ -595,7 +595,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, Arc::clone(&event), ); - } + }, HotShotEvent::VidShareRecv(sender, share) => { let view = share.data.view_number(); // Do nothing if the VID share is old @@ -659,7 +659,7 @@ impl, V: Versions> QuorumVoteTaskS &event_sender, Arc::clone(&event), ); - } + }, HotShotEvent::Timeout(view, ..) => { let view = TYPES::View::new(view.saturating_sub(1)); // cancel old tasks @@ -668,7 +668,7 @@ impl, V: Versions> QuorumVoteTaskS task.abort(); } self.vote_dependencies = current_tasks; - } + }, HotShotEvent::ViewChange(mut view, _) => { view = TYPES::View::new(view.saturating_sub(1)); if !self.update_latest_voted_view(view).await { @@ -680,8 +680,8 @@ impl, V: Versions> QuorumVoteTaskS task.abort(); } self.vote_dependencies = current_tasks; - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/request.rs b/hotshot-task-impls/src/request.rs index 1bf327a98f..c0320105a8 100644 --- a/hotshot-task-impls/src/request.rs +++ b/hotshot-task-impls/src/request.rs @@ -147,14 +147,14 @@ impl> TaskState for NetworkRequest .await; } Ok(()) - } + }, HotShotEvent::ViewChange(view, _) => { let view = *view; if view > self.view { self.view = view; } Ok(()) - } + }, _ => Ok(()), } } @@ -226,7 +226,7 @@ impl> NetworkRequestState { tracing::warn!(e.message); return; - } + }, }; let mut da_committee_for_view = membership_reader.da_committee_members(view).await; if let Ok(leader) = membership_reader.leader(view).await { diff --git a/hotshot-task-impls/src/response.rs b/hotshot-task-impls/src/response.rs index 1ea66cc667..4f15a3a343 100644 --- a/hotshot-task-impls/src/response.rs +++ b/hotshot-task-impls/src/response.rs @@ -111,7 +111,7 @@ impl NetworkResponseState { ) .await; } - } + }, HotShotEvent::QuorumProposalRequestRecv(req, signature) => { // Make sure that this request came from who we think it did if !req.key.validate(signature, req.commit().as_ref()) { @@ -137,16 +137,16 @@ impl NetworkResponseState { ) .await; } - } + }, HotShotEvent::Shutdown => { return; - } - _ => {} + }, + _ => {}, } - } + }, Err(e) => { tracing::error!("Failed to receive event. {:?}", e); - } + }, } } } diff --git a/hotshot-task-impls/src/rewind.rs b/hotshot-task-impls/src/rewind.rs index 82e267dfb3..d4a9bcb58e 100644 --- a/hotshot-task-impls/src/rewind.rs +++ b/hotshot-task-impls/src/rewind.rs @@ -58,7 +58,7 @@ impl TaskState for RewindTaskState { Err(e) => { tracing::error!("Failed to write file {}; error = {}", filename, e); return; - } + }, }; for (event_number, event) in self.events.iter().enumerate() { diff --git a/hotshot-task-impls/src/transactions.rs b/hotshot-task-impls/src/transactions.rs index 06a65f8b63..cbaef6aa38 100644 --- a/hotshot-task-impls/src/transactions.rs +++ b/hotshot-task-impls/src/transactions.rs @@ -134,7 +134,7 @@ impl, V: Versions> TransactionTask Err(e) => { tracing::error!("Failed to calculate version: {:?}", e); return None; - } + }, }; if version < V::Marketplace::VERSION { @@ -159,7 +159,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; - } + }, }; // Request a block from the builder unless we are between versions. @@ -303,11 +303,11 @@ impl, V: Versions> TransactionTask Ok(Err(e)) => { tracing::debug!("Failed to retrieve bundle: {e}"); continue; - } + }, Err(e) => { tracing::debug!("Failed to retrieve bundle: {e}"); continue; - } + }, } } @@ -384,7 +384,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!("Upgrade certificate requires unsupported version, refusing to request blocks: {}", err); return None; - } + }, }; let packed_bundle = match self @@ -410,7 +410,7 @@ impl, V: Versions> TransactionTask .add(1); null_block - } + }, }; broadcast_event( @@ -458,7 +458,7 @@ impl, V: Versions> TransactionTask &self.output_event_stream, ) .await; - } + }, HotShotEvent::ViewChange(view, epoch) => { let view = TYPES::View::new(std::cmp::max(1, **view)); let epoch = if self.upgrade_lock.epochs_enabled(view).await { @@ -491,8 +491,8 @@ impl, V: Versions> TransactionTask self.handle_view_change(&event_stream, view, epoch).await; return Ok(()); } - } - _ => {} + }, + _ => {}, } Ok(()) } @@ -513,7 +513,7 @@ impl, V: Versions> TransactionTask // We still have time, will re-try in a bit sleep(RETRY_DELAY).await; continue; - } + }, } } } @@ -547,13 +547,13 @@ impl, V: Versions> TransactionTask let leaf = consensus_reader.saved_leaves().get(leaf_commitment).context (info!("Missing leaf with commitment {leaf_commitment} for view {target_view} in saved_leaves"))?; return Ok((target_view, leaf.payload_commitment())); - } + }, ViewInner::Failed => { // For failed views, backtrack target_view = TYPES::View::new(target_view.checked_sub(1).context(warn!("Reached genesis. Something is wrong -- have we not decided any blocks since genesis?"))?); continue; - } + }, } } } @@ -571,7 +571,7 @@ impl, V: Versions> TransactionTask Err(e) => { tracing::warn!("Failed to find last vid commitment in time: {e}"); return None; - } + }, }; let parent_comm_sig = match <::SignatureKey as SignatureKey>::sign( @@ -582,7 +582,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!(%err, "Failed to sign block hash"); return None; - } + }, }; while task_start_time.elapsed() < self.builder_timeout { @@ -596,7 +596,7 @@ impl, V: Versions> TransactionTask // We got a block Ok(Ok(block)) => { return Some(block); - } + }, // We failed to get a block Ok(Err(err)) => { @@ -604,13 +604,13 @@ impl, V: Versions> TransactionTask // pause a bit sleep(RETRY_DELAY).await; continue; - } + }, // We timed out while getting available blocks Err(err) => { tracing::info!(%err, "Timeout while getting available blocks"); return None; - } + }, } } @@ -675,7 +675,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err,"Error getting available blocks"); None - } + }, }) .flatten() .collect::>() @@ -735,7 +735,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::error!(%err, "Failed to sign block hash"); continue; - } + }, }; let response = { @@ -751,7 +751,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err, "Error claiming block data"); continue; - } + }, }; let header_input = match header_input { @@ -759,7 +759,7 @@ impl, V: Versions> TransactionTask Err(err) => { tracing::warn!(%err, "Error claiming header input"); continue; - } + }, }; // verify the signature over the message diff --git a/hotshot-task-impls/src/upgrade.rs b/hotshot-task-impls/src/upgrade.rs index 4eaffb5dd8..d45917076e 100644 --- a/hotshot-task-impls/src/upgrade.rs +++ b/hotshot-task-impls/src/upgrade.rs @@ -217,7 +217,7 @@ impl UpgradeTaskState { tracing::debug!("Sending upgrade vote {:?}", vote.view_number()); broadcast_event(Arc::new(HotShotEvent::UpgradeVoteSend(vote)), &tx).await; - } + }, HotShotEvent::UpgradeVoteRecv(ref vote) => { tracing::debug!("Upgrade vote recv, Main Task {:?}", vote.view_number()); @@ -248,7 +248,7 @@ impl UpgradeTaskState { EpochTransitionIndicator::NotInTransition, ) .await?; - } + }, HotShotEvent::ViewChange(new_view, epoch_number) => { if *epoch_number > self.cur_epoch { self.cur_epoch = *epoch_number; @@ -328,8 +328,8 @@ impl UpgradeTaskState { ) .await; } - } - _ => {} + }, + _ => {}, } Ok(()) } diff --git a/hotshot-task-impls/src/vid.rs b/hotshot-task-impls/src/vid.rs index 8f98e018a7..0767288ac9 100644 --- a/hotshot-task-impls/src/vid.rs +++ b/hotshot-task-impls/src/vid.rs @@ -160,7 +160,7 @@ impl, V: Versions> VidTaskState { if *epoch > self.cur_epoch { @@ -178,7 +178,7 @@ impl, V: Versions> VidTaskState { let proposed_block_number = proposal.data.block_header().block_number(); @@ -243,11 +243,11 @@ impl, V: Versions> VidTaskState { return Some(HotShotTaskCompleted); - } - _ => {} + }, + _ => {}, } None } diff --git a/hotshot-task-impls/src/view_sync.rs b/hotshot-task-impls/src/view_sync.rs index 8dcc296cfe..475605db17 100644 --- a/hotshot-task-impls/src/view_sync.rs +++ b/hotshot-task-impls/src/view_sync.rs @@ -233,7 +233,7 @@ impl ViewSyncTaskState { Err(e) => { tracing::warn!(e.message); return; - } + }, }; // We do not have a replica task already running, so start one @@ -278,25 +278,25 @@ impl ViewSyncTaskState { let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncTimeout(view, _, _) => { tracing::debug!("view sync timeout in main task {:?}", view); let view = *view; self.send_to_or_create_replica(event, view, &event_stream) .await; - } + }, HotShotEvent::ViewSyncPreCommitVoteRecv(ref vote) => { let mut map = self.pre_commit_relay_map.write().await; @@ -344,7 +344,7 @@ impl ViewSyncTaskState { .await?; relay_map.insert(relay, vote_collector); - } + }, HotShotEvent::ViewSyncCommitVoteRecv(ref vote) => { let mut map = self.commit_relay_map.write().await; @@ -392,7 +392,7 @@ impl ViewSyncTaskState { ) .await?; relay_map.insert(relay, vote_collector); - } + }, HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { let mut map = self.finalize_relay_map.write().await; @@ -441,7 +441,7 @@ impl ViewSyncTaskState { if let Ok(vote_task) = vote_collector { relay_map.insert(relay, vote_task); } - } + }, &HotShotEvent::ViewChange(new_view, epoch) => { if epoch > self.cur_epoch { @@ -483,7 +483,7 @@ impl ViewSyncTaskState { self.last_garbage_collected_view = self.cur_view - 1; } - } + }, &HotShotEvent::Timeout(view_number, ..) => { // This is an old timeout and we can ignore it ensure!( @@ -528,9 +528,9 @@ impl ViewSyncTaskState { ) .await; } - } + }, - _ => {} + _ => {}, } Ok(()) } @@ -634,7 +634,7 @@ impl ViewSyncReplicaTaskState { .await; } })); - } + }, HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::Commit; @@ -741,7 +741,7 @@ impl ViewSyncReplicaTaskState { .await; } })); - } + }, HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { // Ignore certificate if it is for an older round @@ -796,7 +796,7 @@ impl ViewSyncReplicaTaskState { ) .await; return Some(HotShotTaskCompleted); - } + }, HotShotEvent::ViewSyncTrigger(view_number) => { let view_number = *view_number; @@ -850,7 +850,7 @@ impl ViewSyncReplicaTaskState { })); return None; - } + }, HotShotEvent::ViewSyncTimeout(round, relay, last_seen_certificate) => { let round = *round; @@ -884,11 +884,11 @@ impl ViewSyncReplicaTaskState { &event_stream, ) .await; - } + }, ViewSyncPhase::Finalize => { // This should never occur unimplemented!() - } + }, } self.timeout_task = Some(spawn({ @@ -917,7 +917,7 @@ impl ViewSyncReplicaTaskState { return None; } - } + }, _ => return None, } None diff --git a/hotshot-task-impls/src/vote_collection.rs b/hotshot-task-impls/src/vote_collection.rs index eb8a4cd0aa..9dc3ebc477 100644 --- a/hotshot-task-impls/src/vote_collection.rs +++ b/hotshot-task-impls/src/vote_collection.rs @@ -138,7 +138,7 @@ impl< self.accumulator = None; Ok(Some(cert)) - } + }, } } } @@ -279,7 +279,7 @@ where entry.insert(collector); Ok(()) - } + }, Entry::Occupied(mut entry) => { // handle the vote, and garbage collect if the vote collector is finished if entry @@ -293,7 +293,7 @@ where } Ok(()) - } + }, } } @@ -517,7 +517,7 @@ impl HotShotEvent::QuorumVoteRecv(vote) => { // #3967 REVIEW NOTE: Should we error if self.epoch is None? self.accumulate_vote(&vote.clone().into(), sender).await - } + }, _ => Ok(None), } } @@ -599,7 +599,7 @@ impl match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await - } + }, _ => Ok(None), } } @@ -641,7 +641,7 @@ impl match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await - } + }, _ => Ok(None), } } diff --git a/hotshot-task/src/dependency.rs b/hotshot-task/src/dependency.rs index 7b3d7dfa0b..d4a0e2eed8 100644 --- a/hotshot-task/src/dependency.rs +++ b/hotshot-task/src/dependency.rs @@ -163,13 +163,13 @@ impl Dependency for EventDependency { if (self.match_fn)(&event) { return Some(event); } - } + }, Err(RecvError::Overflowed(n)) => { tracing::error!("Dependency Task overloaded, skipping {} events", n); - } + }, Err(RecvError::Closed) => { return None; - } + }, } } } diff --git a/hotshot-task/src/task.rs b/hotshot-task/src/task.rs index 170d7dbc02..c0e4ec5b07 100644 --- a/hotshot-task/src/task.rs +++ b/hotshot-task/src/task.rs @@ -86,13 +86,13 @@ impl Task { S::handle_event(&mut self.state, input, &self.sender, &self.receiver) .await .inspect_err(|e| tracing::debug!("{e}")); - } + }, Err(RecvError::Closed) => { break self.boxed_state(); - } + }, Err(e) => { tracing::error!("Failed to receive from event stream Error: {}", e); - } + }, } } }) diff --git a/hotshot-testing/src/block_builder/mod.rs b/hotshot-testing/src/block_builder/mod.rs index 103b59c14d..534e6167ef 100644 --- a/hotshot-testing/src/block_builder/mod.rs +++ b/hotshot-testing/src/block_builder/mod.rs @@ -109,13 +109,13 @@ pub fn run_builder_source( match event { BuilderChange::Up if handle.is_none() => { handle = Some(start_builder(url.clone(), source.clone())); - } + }, BuilderChange::Down => { if let Some(handle) = handle.take() { handle.abort(); } - } - _ => {} + }, + _ => {}, } } }); @@ -153,13 +153,13 @@ pub fn run_builder_source_0_1( match event { BuilderChange::Up if handle.is_none() => { handle = Some(start_builder(url.clone(), source.clone())); - } + }, BuilderChange::Down => { if let Some(handle) = handle.take() { handle.abort(); } - } - _ => {} + }, + _ => {}, } } }); diff --git a/hotshot-testing/src/block_builder/random.rs b/hotshot-testing/src/block_builder/random.rs index 7a8723b22f..ec7d6129e2 100644 --- a/hotshot-testing/src/block_builder/random.rs +++ b/hotshot-testing/src/block_builder/random.rs @@ -178,7 +178,7 @@ where match stream.next().await { None => { break; - } + }, Some(evt) => { if let EventType::ViewFinished { view_number } = evt.event { if let Some(change) = self.changes.remove(&view_number) { @@ -192,18 +192,18 @@ where self.blocks.clone(), ))) } - } + }, BuilderChange::Down => { if let Some(handle) = task.take() { handle.abort(); } - } - BuilderChange::FailClaims(_) => {} + }, + BuilderChange::FailClaims(_) => {}, } let _ = self.change_sender.broadcast(change).await; } } - } + }, } } }); diff --git a/hotshot-testing/src/block_builder/simple.rs b/hotshot-testing/src/block_builder/simple.rs index bc098033a8..c29cf8666e 100644 --- a/hotshot-testing/src/block_builder/simple.rs +++ b/hotshot-testing/src/block_builder/simple.rs @@ -382,7 +382,7 @@ impl BuilderTask for SimpleBuilderTask { match stream.next().await { None => { break; - } + }, Some(evt) => match evt.event { EventType::ViewFinished { view_number } => { if let Some(change) = self.changes.remove(&view_number) { @@ -392,14 +392,14 @@ impl BuilderTask for SimpleBuilderTask { should_build_blocks = false; self.transactions.write().await.clear(); self.blocks.write().await.clear(); - } + }, BuilderChange::FailClaims(value) => { self.should_fail_claims.store(value, Ordering::Relaxed); - } + }, } let _ = self.change_sender.broadcast(change).await; } - } + }, EventType::Decide { leaf_chain, .. } if should_build_blocks => { let mut queue = self.transactions.write().await; for leaf_info in leaf_chain.iter() { @@ -413,7 +413,7 @@ impl BuilderTask for SimpleBuilderTask { } } self.blocks.write().await.clear(); - } + }, EventType::DaProposal { proposal, .. } if should_build_blocks => { let payload = TYPES::BlockPayload::from_bytes( &proposal.data.encoded_transactions, @@ -429,7 +429,7 @@ impl BuilderTask for SimpleBuilderTask { txn.claimed = Some(now); } } - } + }, EventType::Transactions { transactions } if should_build_blocks => { let mut queue = self.transactions.write().await; for transaction in transactions { @@ -443,8 +443,8 @@ impl BuilderTask for SimpleBuilderTask { ); } } - } - _ => {} + }, + _ => {}, }, } } diff --git a/hotshot-testing/src/byzantine/byzantine_behaviour.rs b/hotshot-testing/src/byzantine/byzantine_behaviour.rs index 7816b3e354..a634389e55 100644 --- a/hotshot-testing/src/byzantine/byzantine_behaviour.rs +++ b/hotshot-testing/src/byzantine/byzantine_behaviour.rs @@ -67,7 +67,7 @@ impl, V: Versions> EventTransforme consensus.write().await.reset_actions(); result - } + }, _ => vec![event.clone()], } } @@ -96,7 +96,7 @@ impl, V: Versions> EventTransforme match event { HotShotEvent::QuorumProposalSend(_, _) | HotShotEvent::QuorumVoteSend(_) => { vec![event.clone(), event.clone()] - } + }, _ => vec![event.clone()], } } @@ -182,11 +182,11 @@ impl + std::fmt::Debug, V: Version self.handle_proposal_send_event(event, proposal, sender) .await, ]; - } + }, HotShotEvent::QuorumProposalValidated(proposal, _) => { self.validated_proposals.push(proposal.data.clone()); - } - _ => {} + }, + _ => {}, } vec![event.clone()] } @@ -412,7 +412,7 @@ impl + std::fmt::Debug, V: Version .unwrap(); return vec![HotShotEvent::QuorumVoteSend(vote)]; } - } + }, HotShotEvent::TimeoutVoteSend(vote) => { // Check if this view was a dishonest proposal view, if true dont send timeout let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await; @@ -421,11 +421,11 @@ impl + std::fmt::Debug, V: Version // So, dont send the timeout to the next leader from this byzantine replica return vec![]; } - } + }, HotShotEvent::QuorumVoteSend(vote) => { self.votes_sent.push(vote.clone()); - } - _ => {} + }, + _ => {}, } vec![event.clone()] } diff --git a/hotshot-testing/src/consistency_task.rs b/hotshot-testing/src/consistency_task.rs index 8813725db9..9caac9c975 100644 --- a/hotshot-testing/src/consistency_task.rs +++ b/hotshot-testing/src/consistency_task.rs @@ -44,15 +44,15 @@ fn sanitize_node_map( reduced.dedup(); match reduced.len() { - 0 => {} + 0 => {}, 1 => { result.insert(*view, reduced[0].clone()); - } + }, _ => { bail!( "We have received inconsistent leaves for view {view:?}. Leaves:\n\n{leaves:?}" ); - } + }, } } @@ -300,12 +300,12 @@ impl, V: Versions> ConsistencyTas match result { Ok(TestProgress::Finished) => { let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } + }, Err(e) => { self.add_error(e); let _ = self.test_sender.broadcast(TestEvent::Shutdown).await; - } - Ok(TestProgress::Incomplete) => {} + }, + Ok(TestProgress::Incomplete) => {}, } } diff --git a/hotshot-testing/src/predicates/event.rs b/hotshot-testing/src/predicates/event.rs index 6ee78fb2f8..4e1139f09a 100644 --- a/hotshot-testing/src/predicates/event.rs +++ b/hotshot-testing/src/predicates/event.rs @@ -217,7 +217,7 @@ where QuorumProposalSend(proposal, _) => { Some(proposal.data.block_header().payload_commitment()) == null_block::commitment::(num_storage_nodes) - } + }, _ => false, }); Box::new(EventPredicate { check, info }) diff --git a/hotshot-testing/src/script.rs b/hotshot-testing/src/script.rs index b25e286f9f..29a853c6ba 100644 --- a/hotshot-testing/src/script.rs +++ b/hotshot-testing/src/script.rs @@ -121,6 +121,6 @@ pub async fn validate_output_or_panic_in_script( "Stage {} | Output in {} failed to satisfy: {:?}.\n\nReceived:\n\n{:?}", stage_number, script_name, assert, output ) - } + }, } } diff --git a/hotshot-testing/src/spinning_task.rs b/hotshot-testing/src/spinning_task.rs index c13fe23d6b..ac92dc2287 100644 --- a/hotshot-testing/src/spinning_task.rs +++ b/hotshot-testing/src/spinning_task.rs @@ -199,10 +199,10 @@ where marketplace_config, ) .await - } + }, LateNodeContext::Restart => { panic!("Cannot spin up a node with Restart context") - } + }, }; let handle = context.run_tasks().await; @@ -219,13 +219,13 @@ where self.handles.write().await.push(node); } - } + }, NodeAction::Down => { if let Some(node) = self.handles.write().await.get_mut(idx) { tracing::error!("Node {} shutting down", idx); node.handle.shut_down().await; } - } + }, NodeAction::RestartDown(delay_views) => { let node_id = idx.try_into().unwrap(); if let Some(node) = self.handles.write().await.get_mut(idx) { @@ -327,25 +327,25 @@ where self.restart_contexts.insert(idx, new_ctx); } } - } + }, NodeAction::RestartUp => { if let Some(ctx) = self.restart_contexts.remove(&idx) { new_nodes.push((ctx.context, idx)); new_networks.push(ctx.network.clone()); } - } + }, NodeAction::NetworkUp => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks resuming", idx); handle.network.resume(); } - } + }, NodeAction::NetworkDown => { if let Some(handle) = self.handles.write().await.get(idx) { tracing::error!("Node {} networks pausing", idx); handle.network.pause(); } - } + }, } } } diff --git a/hotshot-testing/src/test_builder.rs b/hotshot-testing/src/test_builder.rs index 408c259360..76016fc86b 100644 --- a/hotshot-testing/src/test_builder.rs +++ b/hotshot-testing/src/test_builder.rs @@ -283,7 +283,7 @@ pub async fn create_test_handle< .await; left_handle - } + }, Behaviour::Byzantine(state) => { let state = Box::leak(state); state @@ -300,7 +300,7 @@ pub async fn create_test_handle< marketplace_config, ) .await - } + }, Behaviour::Standard => { let hotshot = SystemContext::::new( public_key, @@ -317,7 +317,7 @@ pub async fn create_test_handle< .await; hotshot.run_tasks().await - } + }, } } diff --git a/hotshot-testing/src/test_runner.rs b/hotshot-testing/src/test_runner.rs index a07cb509c8..8a1daead6b 100644 --- a/hotshot-testing/src/test_runner.rs +++ b/hotshot-testing/src/test_runner.rs @@ -281,12 +281,12 @@ where Ok(res) => match res { TestResult::Pass => { info!("Task shut down successfully"); - } + }, TestResult::Fail(e) => error_list.push(e), }, Err(e) => { tracing::error!("Error Joining the test task {:?}", e); - } + }, } } @@ -560,14 +560,14 @@ where if let Some(task) = builder_tasks.pop() { task.start(Box::new(handle.event_stream())) } - } + }, std::cmp::Ordering::Equal => { // If we have more builder tasks than DA nodes, pin them all on the last node. while let Some(task) = builder_tasks.pop() { task.start(Box::new(handle.event_stream())) } - } - std::cmp::Ordering::Greater => {} + }, + std::cmp::Ordering::Greater => {}, } self.nodes.push(Node { diff --git a/hotshot-testing/src/test_task.rs b/hotshot-testing/src/test_task.rs index 14520c6d13..890fd2ddf9 100644 --- a/hotshot-testing/src/test_task.rs +++ b/hotshot-testing/src/test_task.rs @@ -158,12 +158,12 @@ impl TestTask { let _ = S::handle_event(&mut self.state, (input, id)) .await .inspect_err(|e| tracing::error!("{e}")); - } + }, Ok((Err(e), _id, _)) => { error!("Error from one channel in test task {:?}", e); sleep(Duration::from_millis(4000)).await; - } - _ => {} + }, + _ => {}, }; } }) @@ -202,7 +202,7 @@ pub async fn add_network_message_test_task< Err(e) => { error!("Failed to receive message: {:?}", e); continue; - } + }, }; // Deserialize the message @@ -212,7 +212,7 @@ pub async fn add_network_message_test_task< Err(e) => { tracing::error!("Failed to deserialize message: {:?}", e); continue; - } + }, }; // Handle the message diff --git a/hotshot-testing/src/txn_task.rs b/hotshot-testing/src/txn_task.rs index 41b5ec3b14..f20d1524f3 100644 --- a/hotshot-testing/src/txn_task.rs +++ b/hotshot-testing/src/txn_task.rs @@ -52,7 +52,7 @@ impl, V: Versions> TxnTask match handles.get(idx) { None => { tracing::error!("couldn't get node in txn task"); - } + }, Some(node) => { // use rand::seq::IteratorRandom; // we're assuming all nodes have the same leaf. @@ -64,7 +64,7 @@ impl, V: Versions> TxnTask .submit_transaction(txn.clone()) .await .expect("Could not send transaction"); - } + }, } } } diff --git a/hotshot-testing/src/view_generator.rs b/hotshot-testing/src/view_generator.rs index f46d4b1101..3209b92c25 100644 --- a/hotshot-testing/src/view_generator.rs +++ b/hotshot-testing/src/view_generator.rs @@ -675,7 +675,7 @@ impl Stream for TestViewGenerator { Poll::Ready(test_view) => { self.current_view = Some(test_view.clone()); Poll::Ready(Some(test_view)) - } + }, Poll::Pending => Poll::Pending, } } diff --git a/hotshot-testing/src/view_sync_task.rs b/hotshot-testing/src/view_sync_task.rs index 9ad967c74b..07da62de1b 100644 --- a/hotshot-testing/src/view_sync_task.rs +++ b/hotshot-testing/src/view_sync_task.rs @@ -57,7 +57,7 @@ impl> TestTaskState | HotShotEvent::ViewSyncFinalizeCertificateSend(_, _) | HotShotEvent::ViewSyncTrigger(_) => { self.hit_view_sync.insert(id); - } + }, _ => (), } @@ -75,7 +75,7 @@ impl> TestTaskState hit_view_sync: self.hit_view_sync.clone(), })) } - } + }, } } } diff --git a/hotshot-testing/tests/tests_6/test_epochs.rs b/hotshot-testing/tests/tests_6/test_epochs.rs index bed8d185ac..cd4031b534 100644 --- a/hotshot-testing/tests/tests_6/test_epochs.rs +++ b/hotshot-testing/tests/tests_6/test_epochs.rs @@ -8,8 +8,8 @@ use hotshot_example_types::{ node_types::{ CombinedImpl, EpochUpgradeTestVersions, EpochsTestVersions, Libp2pImpl, MemoryImpl, PushCdnImpl, RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, - TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, - TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, TestTypesEpochCatchupTypes + TestConsecutiveLeaderTypes, TestTwoStakeTablesTypes, TestTypes, TestTypesEpochCatchupTypes, + TestTypesRandomizedCommitteeMembers, TestTypesRandomizedLeader, }, testable_delay::{DelayConfig, DelayOptions, DelaySettings, SupportedTraitTypesForAsyncDelay}, }; @@ -505,23 +505,23 @@ cross_tests!( // }; // let mut metadata = TestDescription::default().set_num_nodes(20,20); // let mut catchup_nodes = vec![]; -// +// // for i in 0..20 { // catchup_nodes.push(ChangeNode { // idx: i, // updown: NodeAction::RestartDown(0), // }) // } -// +// // metadata.timing_data = timing_data; -// +// // metadata.spinning_properties = SpinningTaskDescription { // // Restart all the nodes in view 10 // node_changes: vec![(10, catchup_nodes)], // }; // metadata.view_sync_properties = // hotshot_testing::view_sync_task::ViewSyncTaskDescription::Threshold(0, 20); -// +// // metadata.completion_task_description = // CompletionTaskDescription::TimeBasedCompletionTaskBuilder( // TimeBasedCompletionTaskDescription { @@ -536,7 +536,7 @@ cross_tests!( // decide_timeout: Duration::from_secs(20), // ..Default::default() // }; -// +// // metadata // }, // ); diff --git a/hotshot-types/src/consensus.rs b/hotshot-types/src/consensus.rs index 66ef1db9bf..c85fce8b7c 100644 --- a/hotshot-types/src/consensus.rs +++ b/hotshot-types/src/consensus.rs @@ -589,7 +589,7 @@ impl Consensus { // because the leader of view n + 1 may propose to the DA (and we would vote) // before the leader of view n. return true; - } + }, _ => return true, }; if view > *old_view { diff --git a/hotshot-types/src/data.rs b/hotshot-types/src/data.rs index 1ea1831ce2..313d62b144 100644 --- a/hotshot-types/src/data.rs +++ b/hotshot-types/src/data.rs @@ -505,13 +505,13 @@ impl VidDisperseShare { .into_iter() .map(|share| Self::V0(share)) .collect() - } + }, VidDisperse::V1(vid_disperse) => { VidDisperseShare2::::from_vid_disperse(vid_disperse) .into_iter() .map(|share| Self::V1(share)) .collect() - } + }, } } @@ -672,10 +672,10 @@ impl ViewChangeEvidence { match self { ViewChangeEvidence::Timeout(timeout_cert) => { ViewChangeEvidence2::Timeout(timeout_cert.to_tc2()) - } + }, ViewChangeEvidence::ViewSync(view_sync_cert) => { ViewChangeEvidence2::ViewSync(view_sync_cert.to_vsc2()) - } + }, } } } @@ -705,10 +705,10 @@ impl ViewChangeEvidence2 { match self { ViewChangeEvidence2::Timeout(timeout_cert) => { ViewChangeEvidence::Timeout(timeout_cert.to_tc()) - } + }, ViewChangeEvidence2::ViewSync(view_sync_cert) => { ViewChangeEvidence::ViewSync(view_sync_cert.to_vsc()) - } + }, } } } @@ -1242,7 +1242,7 @@ impl Leaf2 { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + (None | Some(_), None) => {}, // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. @@ -1252,13 +1252,13 @@ impl Leaf2 { || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); - } + }, // If we both have a certificate, they should be identical. // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. // I think this is a fairly lax restriction. (Some(cert), Some(parent_cert)) => { ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } + }, } // This check should be added once we sort out the genesis leaf/justify_qc issue. @@ -1621,7 +1621,7 @@ impl Leaf { // Easiest cases are: // - no upgrade certificate on either: this is the most common case, and is always fine. // - if the parent didn't have a certificate, but we see one now, it just means that we have begun an upgrade: again, this is always fine. - (None | Some(_), None) => {} + (None | Some(_), None) => {}, // If we no longer see a cert, we have to make sure that we either: // - no longer care because we have passed new_version_first_view, or // - no longer care because we have passed `decide_by` without deciding the certificate. @@ -1631,13 +1631,13 @@ impl Leaf { || (self.view_number() > parent_cert.data.decide_by && decided_upgrade_certificate_read.is_none()), "The new leaf is missing an upgrade certificate that was present in its parent, and should still be live." ); - } + }, // If we both have a certificate, they should be identical. // Technically, this prevents us from initiating a new upgrade in the view immediately following an upgrade. // I think this is a fairly lax restriction. (Some(cert), Some(parent_cert)) => { ensure!(cert == parent_cert, "The new leaf does not extend the parent leaf, because it has attached a different upgrade certificate."); - } + }, } // This check should be added once we sort out the genesis leaf/justify_qc issue. diff --git a/hotshot-types/src/lib.rs b/hotshot-types/src/lib.rs index eac8950a5e..0d9bf701fc 100644 --- a/hotshot-types/src/lib.rs +++ b/hotshot-types/src/lib.rs @@ -134,7 +134,7 @@ impl PeerConfig { Err(e) => { error!(?e, "Failed to serialize public key"); vec![] - } + }, } } @@ -148,7 +148,7 @@ impl PeerConfig { Err(e) => { error!(?e, "Failed to deserialize public key"); None - } + }, } } } diff --git a/hotshot-types/src/message.rs b/hotshot-types/src/message.rs index 12e07efd87..09fc7eb05b 100644 --- a/hotshot-types/src/message.rs +++ b/hotshot-types/src/message.rs @@ -325,66 +325,66 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, GeneralConsensusMessage::Proposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, GeneralConsensusMessage::ProposalRequested(req, _) => req.view_number, GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() - } + }, GeneralConsensusMessage::ProposalResponse2(proposal) => { proposal.data.view_number() - } + }, GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.view_number() - } + }, GeneralConsensusMessage::TimeoutVote2(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.view_number(), GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { message.view_number() - } + }, GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), GeneralConsensusMessage::HighQc(qc) | GeneralConsensusMessage::ExtendedQc(qc, _) => qc.view_number(), } - } + }, SequencingMessage::Da(da_message) => { match da_message { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), @@ -392,12 +392,12 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.view_number() - } + }, DaConsensusMessage::DaVote2(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate2(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg2(disperse) => disperse.data.view_number(), } - } + }, } } @@ -410,12 +410,12 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, GeneralConsensusMessage::Proposal2(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, GeneralConsensusMessage::ProposalRequested(_, _) => None, GeneralConsensusMessage::ProposalResponse(proposal) => proposal.data.epoch(), GeneralConsensusMessage::ProposalResponse2(proposal) => proposal.data.epoch(), @@ -427,35 +427,35 @@ impl SequencingMessage { GeneralConsensusMessage::ViewSyncFinalizeVote(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitCertificate(message) => { message.epoch() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeCertificate(message) => { message.epoch() - } + }, GeneralConsensusMessage::TimeoutVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncCommitVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeVote2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncPreCommitCertificate2(message) => { message.epoch() - } + }, GeneralConsensusMessage::ViewSyncCommitCertificate2(message) => message.epoch(), GeneralConsensusMessage::ViewSyncFinalizeCertificate2(message) => { message.epoch() - } + }, GeneralConsensusMessage::UpgradeProposal(message) => message.data.epoch(), GeneralConsensusMessage::UpgradeVote(message) => message.epoch(), GeneralConsensusMessage::HighQc(qc) | GeneralConsensusMessage::ExtendedQc(qc, _) => qc.epoch(), } - } + }, SequencingMessage::Da(da_message) => { match da_message { DaConsensusMessage::DaProposal(p) => { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, DaConsensusMessage::DaVote(vote_message) => vote_message.epoch(), DaConsensusMessage::DaCertificate(cert) => cert.epoch(), DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.epoch(), @@ -464,11 +464,11 @@ impl SequencingMessage { // view of leader in the leaf when proposal // this should match replica upon receipt p.data.epoch() - } + }, DaConsensusMessage::DaVote2(vote_message) => vote_message.epoch(), DaConsensusMessage::DaCertificate2(cert) => cert.epoch(), } - } + }, } } } @@ -649,7 +649,7 @@ impl UpgradeLock { } else { V::Base::VERSION } - } + }, None => V::Base::VERSION, }; @@ -669,7 +669,7 @@ impl UpgradeLock { } else { cert.data.old_version } - } + }, None => V::Base::VERSION, } } @@ -698,7 +698,7 @@ impl UpgradeLock { v if v == V::Upgrade::VERSION => Serializer::::serialize(&message), v => { bail!("Attempted to serialize with version {}, which is incompatible. This should be impossible.", v); - } + }, }; serialized_message @@ -725,7 +725,7 @@ impl UpgradeLock { v if v == V::Upgrade::VERSION => Serializer::::deserialize(message), v => { bail!("Cannot deserialize message with stated version {}", v); - } + }, } .wrap() .context(info!("Failed to deserialize message!"))?; diff --git a/hotshot-types/src/network.rs b/hotshot-types/src/network.rs index f2d35b6984..3a5dd4fe31 100644 --- a/hotshot-types/src/network.rs +++ b/hotshot-types/src/network.rs @@ -209,7 +209,7 @@ impl NetworkConfig { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::ReadFromFileError(e)); - } + }, }; // deserialize @@ -256,7 +256,7 @@ impl NetworkConfig { Ok(data) => data, Err(e) => { return Err(NetworkConfigError::SerializeError(e)); - } + }, }; // write to file diff --git a/hotshot-types/src/traits/storage.rs b/hotshot-types/src/traits/storage.rs index 991ba8282f..e130a3d67f 100644 --- a/hotshot-types/src/traits/storage.rs +++ b/hotshot-types/src/traits/storage.rs @@ -54,7 +54,7 @@ pub trait Storage: Send + Sync + Clone { _pd: std::marker::PhantomData, }) .await - } + }, VidDisperseShare::V1(share) => { self.append_vid2(&Proposal { data: share.clone(), @@ -62,7 +62,7 @@ pub trait Storage: Send + Sync + Clone { _pd: std::marker::PhantomData, }) .await - } + }, } } /// Add a proposal to the stored DA proposals. diff --git a/hotshot-types/src/vote.rs b/hotshot-types/src/vote.rs index e33e66f57f..1631896652 100644 --- a/hotshot-types/src/vote.rs +++ b/hotshot-types/src/vote.rs @@ -170,7 +170,7 @@ impl< Err(e) => { tracing::warn!("Failed to generate versioned vote data: {e}"); return None; - } + }, }; if !key.validate(&vote.signature(), vote_commitment.as_ref()) { diff --git a/hotshot-utils/src/anytrace.rs b/hotshot-utils/src/anytrace.rs index 12e129ca01..b9aec49221 100644 --- a/hotshot-utils/src/anytrace.rs +++ b/hotshot-utils/src/anytrace.rs @@ -24,21 +24,21 @@ impl Log for Error { match error_level { Level::Trace => { tracing::trace!("{}", self.message); - } + }, Level::Debug => { tracing::debug!("{}", self.message); - } + }, Level::Info => { tracing::info!("{}", self.message); - } + }, Level::Warn => { tracing::warn!("{}", self.message); - } + }, Level::Error => { tracing::error!("{}", self.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } } @@ -48,7 +48,7 @@ impl Log for Result { let error = match self { Ok(_) => { return; - } + }, Err(e) => e, }; @@ -60,21 +60,21 @@ impl Log for Result { match error_level { Level::Trace => { tracing::trace!("{}", error.message); - } + }, Level::Debug => { tracing::debug!("{}", error.message); - } + }, Level::Info => { tracing::info!("{}", error.message); - } + }, Level::Warn => { tracing::warn!("{}", error.message); - } + }, Level::Error => { tracing::error!("{}", error.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } } diff --git a/hotshot-utils/src/anytrace/macros.rs b/hotshot-utils/src/anytrace/macros.rs index 29c5178b07..b9f6b7db56 100644 --- a/hotshot-utils/src/anytrace/macros.rs +++ b/hotshot-utils/src/anytrace/macros.rs @@ -167,21 +167,21 @@ macro_rules! log { match error_level { Level::Trace => { tracing::trace!("{}", error.message); - } + }, Level::Debug => { tracing::debug!("{}", error.message); - } + }, Level::Info => { tracing::info!("{}", error.message); - } + }, Level::Warn => { tracing::warn!("{}", error.message); - } + }, Level::Error => { tracing::error!("{}", error.message); - } + }, // impossible - Level::Unspecified => {} + Level::Unspecified => {}, } } }; diff --git a/hotshot/src/lib.rs b/hotshot/src/lib.rs index e1ffbae935..f8ad1573a3 100644 --- a/hotshot/src/lib.rs +++ b/hotshot/src/lib.rs @@ -215,10 +215,10 @@ impl, V: Versions> SystemContext Arc { #[allow(clippy::panic)] match storage.migrate_consensus().await { - Ok(()) => {} + Ok(()) => {}, Err(e) => { panic!("Failed to migrate consensus storage: {e}"); - } + }, } let internal_chan = broadcast(EVENT_CHANNEL_SIZE); @@ -767,10 +767,10 @@ where match event { Either::Left(msg) => { let _ = left_sender.broadcast(msg.into()).await; - } + }, Either::Right(msg) => { let _ = right_sender.broadcast(msg.into()).await; - } + }, } } } diff --git a/hotshot/src/tasks/mod.rs b/hotshot/src/tasks/mod.rs index 544154a2cd..3f1f79c858 100644 --- a/hotshot/src/tasks/mod.rs +++ b/hotshot/src/tasks/mod.rs @@ -280,13 +280,13 @@ pub fn create_shutdown_event_monitor { return; - } + }, Err(e) => { tracing::error!("Shutdown event monitor channel recv error: {}", e); - } + }, } } } diff --git a/hotshot/src/traits/networking/combined_network.rs b/hotshot/src/traits/networking/combined_network.rs index 87b4a873c2..dc6af286cc 100644 --- a/hotshot/src/traits/networking/combined_network.rs +++ b/hotshot/src/traits/networking/combined_network.rs @@ -185,12 +185,12 @@ impl CombinedNetworks { // The primary fail counter reached 0, the primary is now considered up primary_down.store(false, Ordering::Relaxed); debug!("primary_fail_counter reached zero, primary_down set to false"); - } + }, c => { // Decrement the primary fail counter primary_fail_counter.store(c - 1, Ordering::Relaxed); debug!("primary_fail_counter set to {:?}", c - 1); - } + }, } return Ok(()); } @@ -211,7 +211,7 @@ impl CombinedNetworks { c if c < COMBINED_NETWORK_PRIMARY_CHECK_INTERVAL => { // Just increment the 'no delay counter' self.no_delay_counter.store(c + 1, Ordering::Relaxed); - } + }, _ => { // The 'no delay counter' reached the threshold debug!( @@ -226,7 +226,7 @@ impl CombinedNetworks { // The primary fail counter is set just below the threshold to delay the next message self.primary_fail_counter .store(COMBINED_NETWORK_MIN_PRIMARY_FAILURES, Ordering::Relaxed); - } + }, } } // Send the message diff --git a/hotshot/src/traits/networking/libp2p_network.rs b/hotshot/src/traits/networking/libp2p_network.rs index 93e9c5ef21..ebabb516e3 100644 --- a/hotshot/src/traits/networking/libp2p_network.rs +++ b/hotshot/src/traits/networking/libp2p_network.rs @@ -289,7 +289,7 @@ impl TestableNetworkingImplementation for Libp2pNetwork { Ok(network) => network, Err(err) => { panic!("Failed to create libp2p network: {err:?}"); - } + }, }, ) }) @@ -372,7 +372,7 @@ pub fn derive_libp2p_multiaddr(addr: &String) -> anyhow::Result { } format!("/dns/{host}/udp/{port}/quic-v1") - } + }, }; // Convert the multiaddr string to a `Multiaddr` @@ -680,7 +680,7 @@ impl Libp2pNetwork { sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!("failed to send gossip message: {err}")) })?; - } + }, DirectRequest(msg, _pid, chan) => { sender.try_send(msg).map_err(|err| { NetworkError::ChannelSendError(format!( @@ -702,12 +702,12 @@ impl Libp2pNetwork { { error!("failed to ack!"); }; - } - DirectResponse(_msg, _) => {} + }, + DirectResponse(_msg, _) => {}, NetworkEvent::IsBootstrapped => { error!("handle_recvd_events received `NetworkEvent::IsBootstrapped`, which should be impossible."); - } - NetworkEvent::ConnectedPeersUpdate(_) => {} + }, + NetworkEvent::ConnectedPeersUpdate(_) => {}, } Ok::<(), NetworkError>(()) } @@ -909,7 +909,7 @@ impl ConnectedNetwork for Libp2pNetwork { return Err(NetworkError::LookupError(format!( "failed to look up node for direct message: {err}" ))); - } + }, }; #[cfg(feature = "hotshot-testing")] @@ -941,7 +941,7 @@ impl ConnectedNetwork for Libp2pNetwork { Err(e) => { self.inner.metrics.num_failed_messages.add(1); Err(e) - } + }, } } @@ -1002,7 +1002,7 @@ impl ConnectedNetwork for Libp2pNetwork { Ok(m) => m, Err(e) => { return tracing::warn!(e.message); - } + }, }; let future_leader = match membership.leader(future_view).await { Ok(l) => l, @@ -1011,7 +1011,7 @@ impl ConnectedNetwork for Libp2pNetwork { "Failed to calculate leader for view {:?}: {e}", future_view ); - } + }, }; let _ = self diff --git a/hotshot/src/traits/networking/memory_network.rs b/hotshot/src/traits/networking/memory_network.rs index 9aa8adfef2..4a0663d246 100644 --- a/hotshot/src/traits/networking/memory_network.rs +++ b/hotshot/src/traits/networking/memory_network.rs @@ -282,10 +282,10 @@ impl ConnectedNetwork for MemoryNetwork { match res { Ok(()) => { trace!(?key, "Delivered message to remote"); - } + }, Err(e) => { warn!(?e, ?key, "Error sending broadcast message to node"); - } + }, } } } @@ -336,10 +336,10 @@ impl ConnectedNetwork for MemoryNetwork { match res { Ok(()) => { trace!(?key, "Delivered message to remote"); - } + }, Err(e) => { warn!(?e, ?key, "Error sending broadcast message to node"); - } + }, } } } @@ -375,7 +375,7 @@ impl ConnectedNetwork for MemoryNetwork { Ok(()) => { trace!(?recipient, "Delivered message to remote"); Ok(()) - } + }, Err(e) => Err(NetworkError::MessageSendError(format!( "error sending direct message to node: {e}", ))), diff --git a/hotshot/src/traits/networking/push_cdn_network.rs b/hotshot/src/traits/networking/push_cdn_network.rs index a742e5f857..0f222f2729 100644 --- a/hotshot/src/traits/networking/push_cdn_network.rs +++ b/hotshot/src/traits/networking/push_cdn_network.rs @@ -591,7 +591,7 @@ impl ConnectedNetwork for PushCdnNetwork { return Err(NetworkError::MessageReceiveError(format!( "failed to receive message: {error}" ))); - } + }, }; // Extract the underlying message diff --git a/hotshot/src/types/handle.rs b/hotshot/src/types/handle.rs index 07e6bab901..4f100f8c0c 100644 --- a/hotshot/src/types/handle.rs +++ b/hotshot/src/types/handle.rs @@ -116,17 +116,17 @@ impl + 'static, V: Versions> self.network .broadcast_message(serialized_message, Topic::Global, BroadcastDelay::None) .await?; - } + }, RecipientList::Direct(recipient) => { self.network .direct_message(serialized_message, recipient) .await?; - } + }, RecipientList::Many(recipients) => { self.network .da_broadcast_message(serialized_message, recipients, BroadcastDelay::None) .await?; - } + }, } Ok(()) } @@ -199,7 +199,7 @@ impl + 'static, V: Versions> Err(e) => { tracing::warn!(e.message); continue; - } + }, }; // Make sure that the quorum_proposal is valid if let Err(err) = quorum_proposal.validate_signature(&membership).await { diff --git a/marketplace-builder-core/src/service.rs b/marketplace-builder-core/src/service.rs index 29fff93c46..fca236379b 100644 --- a/marketplace-builder-core/src/service.rs +++ b/marketplace-builder-core/src/service.rs @@ -189,7 +189,7 @@ where match event.event { EventType::Error { error } => { tracing::error!("Error event in HotShot: {:?}", error); - } + }, EventType::Transactions { transactions } => { let hooks = Arc::clone(&hooks); let coordinator = Arc::clone(&coordinator); @@ -208,20 +208,20 @@ where .collect::>() .await; }); - } + }, EventType::Decide { leaf_chain, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_decide(leaf_chain).await }); - } + }, EventType::DaProposal { proposal, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_da_proposal(proposal.data).await }); - } + }, EventType::QuorumProposal { proposal, .. } => { let coordinator = Arc::clone(&coordinator); spawn(async move { coordinator.handle_quorum_proposal(proposal.data).await }); - } - _ => {} + }, + _ => {}, } } } @@ -356,14 +356,14 @@ where // If we couldn't find the state because it hasn't yet been created, try again sleep(self.api_timeout / 10).await; continue; - } + }, BuilderStateLookup::Decided => { // If we couldn't find the state because the view has already been decided, we can just return an error tracing::warn!("Requested a bundle for view we already GCd as decided",); return Err(BuildError::Error( "Request for a bundle for a view that has already been decided.".to_owned(), )); - } + }, }; tracing::info!( diff --git a/marketplace-builder-core/src/testing/order_test.rs b/marketplace-builder-core/src/testing/order_test.rs index 9de0297172..036cb8cad8 100644 --- a/marketplace-builder-core/src/testing/order_test.rs +++ b/marketplace-builder-core/src/testing/order_test.rs @@ -64,12 +64,12 @@ impl RoundTransactionBehavior { ]), ); transactions - } + }, RoundTransactionBehavior::AdjustRemoveTail => { let mut transactions = transactions.clone(); transactions.pop(); transactions - } + }, RoundTransactionBehavior::ProposeInAdvance(propose_in_advance_round) => { let mut transactions = transactions.clone(); transactions.push(TestTransaction::new(vec![ @@ -77,12 +77,12 @@ impl RoundTransactionBehavior { 0_u8, ])); transactions - } + }, RoundTransactionBehavior::AdjustRemove => { let mut transactions = transactions.clone(); transactions.remove(rand::random::() % (transactions.len() - 1)); transactions - } + }, } } } diff --git a/marketplace-builder-shared/src/coordinator/mod.rs b/marketplace-builder-shared/src/coordinator/mod.rs index e546aa3f23..b3b9b81da4 100644 --- a/marketplace-builder-shared/src/coordinator/mod.rs +++ b/marketplace-builder-shared/src/coordinator/mod.rs @@ -195,7 +195,7 @@ where }, ); return Err(Error::TxnSender(err)); - } + }, }; self.update_txn_status(&commit, TransactionStatus::Pending); @@ -251,15 +251,15 @@ where (Either::Right(da_proposal), Either::Left(quorum_proposal)) | (Either::Left(quorum_proposal), Either::Right(da_proposal)) => { self.spawn_builder_state(quorum_proposal, da_proposal).await - } + }, _ => { unreachable!() - } + }, } - } + }, Entry::Vacant(entry) => { entry.insert(proposal); - } + }, } } @@ -499,10 +499,10 @@ where "Not changing status of rejected/sequenced transaction", ); return; - } + }, _ => { tracing::debug!(?old_status, ?new_status, "Changing status of transaction",); - } + }, } } self.tx_status.insert(*txn_hash, new_status); diff --git a/marketplace-builder-shared/src/coordinator/tiered_view_map.rs b/marketplace-builder-shared/src/coordinator/tiered_view_map.rs index 6d52f3d955..46d648bebd 100644 --- a/marketplace-builder-shared/src/coordinator/tiered_view_map.rs +++ b/marketplace-builder-shared/src/coordinator/tiered_view_map.rs @@ -143,10 +143,10 @@ where match self.0.entry(*key.view()) { Entry::Vacant(entry) => { entry.insert(nem![key.into_subkey() => value]); - } + }, Entry::Occupied(mut entry) => { entry.get_mut().insert(key.into_subkey(), value); - } + }, } } diff --git a/marketplace-builder-shared/src/error.rs b/marketplace-builder-shared/src/error.rs index ac2d3b155e..9200c49c06 100644 --- a/marketplace-builder-shared/src/error.rs +++ b/marketplace-builder-shared/src/error.rs @@ -33,18 +33,18 @@ impl From> for BuildError { match value { Error::SignatureValidation => { BuildError::Error("Signature validation failed".to_owned()) - } + }, Error::Signing(_) => BuildError::Error("Failed to sign response".to_owned()), Error::ApiTimeout => BuildError::Error("Timeout".to_owned()), Error::NotFound => BuildError::NotFound, Error::AlreadyDecided => { BuildError::Error("Request for an already decided view".to_owned()) - } + }, Error::BuildBlock(_) => BuildError::Error("Failed to build block".to_owned()), Error::TxnSender(_) => BuildError::Error("Transaction channel error".to_owned()), Error::TxTooBig { len, max_tx_len } => { BuildError::Error(format!("Transaction too big ({len}/{max_tx_len}")) - } + }, } } } diff --git a/marketplace-builder-shared/src/state.rs b/marketplace-builder-shared/src/state.rs index 9666d17337..9cee9aa3f1 100644 --- a/marketplace-builder-shared/src/state.rs +++ b/marketplace-builder-shared/src/state.rs @@ -208,7 +208,7 @@ where self.txn_queue.write().await.insert(txn); queue_empty = false; - } + }, Err(async_broadcast::TryRecvError::Empty) | Err(async_broadcast::TryRecvError::Closed) => { @@ -216,12 +216,12 @@ where // If it's closed that's a big problem and we should // probably indicate it as such. break; - } + }, Err(async_broadcast::TryRecvError::Overflowed(lost)) => { tracing::warn!("Missed {lost} transactions due to backlog"); continue; - } + }, } } queue_empty diff --git a/marketplace-builder-shared/src/testing/consensus.rs b/marketplace-builder-shared/src/testing/consensus.rs index f4ee61cd00..f19e9e7734 100644 --- a/marketplace-builder-shared/src/testing/consensus.rs +++ b/marketplace-builder-shared/src/testing/consensus.rs @@ -108,7 +108,7 @@ impl SimulatedChainState { &TestInstanceState::default(), ) .await - } + }, Some(prev_proposal) => { let prev_justify_qc = &prev_proposal.justify_qc(); let quorum_data = QuorumData2:: { @@ -124,7 +124,7 @@ impl SimulatedChainState { prev_justify_qc.signatures.clone(), PhantomData, ) - } + }, }; tracing::debug!("Iteration: {} justify_qc: {:?}", self.round, justify_qc); diff --git a/marketplace-builder-shared/src/testing/generation.rs b/marketplace-builder-shared/src/testing/generation.rs index d7667a24e8..e16293a384 100644 --- a/marketplace-builder-shared/src/testing/generation.rs +++ b/marketplace-builder-shared/src/testing/generation.rs @@ -137,7 +137,7 @@ where .map(Result::unwrap), ); } - } + }, GenerationStrategy::Random { min_per_view, max_per_view, @@ -164,7 +164,7 @@ where self.txn_nonce += 1; } - } + }, GenerationStrategy::Flood { min_tx_size, max_tx_size, @@ -188,7 +188,7 @@ where self.txn_nonce += 1; } - } + }, }; } } @@ -235,7 +235,7 @@ where .publish_transaction_async(txn) .await .expect("Failed to submit transaction to public mempool"); - } + }, SubmissionEndpoint::Private => { if let Err(e) = private_mempool_client .post::<()>("submit") @@ -248,17 +248,17 @@ where // If we can't reach the builder altogether, test should fail builder::Error::Request(request_error) => { panic!("Builder API not available: {request_error}") - } + }, // If the builder returns an error, we will re-submit this transaction // on the next view, so we return it to the queue and break error => { tracing::warn!(?error, "Builder API error"); self.txn_queue.push_front(txn); break; - } + }, }; } - } + }, } } } diff --git a/marketplace-builder-shared/src/utils/event_service_wrapper.rs b/marketplace-builder-shared/src/utils/event_service_wrapper.rs index ece95e072d..9397b7cb72 100644 --- a/marketplace-builder-shared/src/utils/event_service_wrapper.rs +++ b/marketplace-builder-shared/src/utils/event_service_wrapper.rs @@ -58,7 +58,7 @@ impl EventServiceStream break, Err(err) => { tracing::debug!(?err, "Healthcheck failed, retrying"); - } + }, } sleep(Self::RETRY_PERIOD).await; } @@ -90,18 +90,18 @@ impl EventServiceStream { return Some((event, this)); - } + }, Ok(Some(Err(err))) => { warn!(?err, "Error in event stream"); continue; - } + }, Ok(None) => { warn!("Event stream ended, attempting reconnection"); let fut = Self::connect_inner(this.api_url.clone()); let _ = std::mem::replace(&mut this.connection, Right(Box::pin(fut))); continue; - } + }, Err(_) => { // Timeout occurred, reconnect warn!("Timeout waiting for next event; reconnecting"); @@ -109,21 +109,21 @@ impl EventServiceStream match reconnection.await { Ok(connection) => { let _ = std::mem::replace(&mut this.connection, Left(connection)); continue; - } + }, Err(err) => { error!(?err, "Error while reconnecting, will retry in a while"); sleep(Self::RETRY_PERIOD).await; let fut = Self::connect_inner(this.api_url.clone()); let _ = std::mem::replace(&mut this.connection, Right(Box::pin(fut))); continue; - } + }, }, } } diff --git a/marketplace-builder/src/builder.rs b/marketplace-builder/src/builder.rs index cd334f9c0e..c564d23a00 100644 --- a/marketplace-builder/src/builder.rs +++ b/marketplace-builder/src/builder.rs @@ -608,7 +608,7 @@ mod test { get_bundle(builder_client, parent_view_number, parent_commitment).await, parent_view_number, ) - } + }, Mempool::Private => { submit_and_get_bundle_with_private_mempool( builder_client, @@ -616,7 +616,7 @@ mod test { urls, ) .await - } + }, }; assert_eq!(bundle.transactions, vec![registered_transaction.clone()]); @@ -728,7 +728,7 @@ mod test { get_bundle(builder_client, parent_view_number, parent_commitment).await, parent_view_number, ) - } + }, Mempool::Private => { submit_and_get_bundle_with_private_mempool( builder_client, @@ -736,7 +736,7 @@ mod test { urls, ) .await - } + }, }; assert_eq!(bundle.transactions, vec![unregistered_transaction.clone()]); diff --git a/marketplace-builder/src/hooks.rs b/marketplace-builder/src/hooks.rs index ede7452152..43f4096802 100644 --- a/marketplace-builder/src/hooks.rs +++ b/marketplace-builder/src/hooks.rs @@ -67,11 +67,11 @@ pub async fn fetch_namespaces_to_skip(solver_base_url: Url) -> Option { error!("Failed to get the registered rollups: {:?}.", e); None - } + }, } } @@ -130,7 +130,7 @@ impl BuilderHooks for EspressoReserveHooks { Err(e) => { error!("Failed to sign the bid txn: {:?}.", e); return; - } + }, }; let solver_client = connect_to_solver(solver_base_url); @@ -172,12 +172,12 @@ impl BuilderHooks for EspressoFallbackHooks { Some(namespaces_to_skip) => { transactions.retain(|txn| !namespaces_to_skip.contains(&txn.namespace())); transactions - } + }, // Solver connection has failed and we don't have up-to-date information on this None => { error!("Not accepting transactions due to outdated information"); Vec::new() - } + }, } } diff --git a/marketplace-solver/src/api.rs b/marketplace-solver/src/api.rs index 5f0d9c8c30..11ed573a9a 100644 --- a/marketplace-solver/src/api.rs +++ b/marketplace-solver/src/api.rs @@ -164,7 +164,7 @@ fn merge_toml(into: &mut Value, from: Value) { Entry::Occupied(mut entry) => merge_toml(entry.get_mut(), value), Entry::Vacant(entry) => { entry.insert(value); - } + }, } } } diff --git a/marketplace-solver/src/database.rs b/marketplace-solver/src/database.rs index c2d09f84af..127c385935 100644 --- a/marketplace-solver/src/database.rs +++ b/marketplace-solver/src/database.rs @@ -60,7 +60,7 @@ impl PostgresClient { } connect_opts.to_url_lossy() - } + }, }; if let Some(max_connections) = max_connections { diff --git a/marketplace-solver/src/events.rs b/marketplace-solver/src/events.rs index 913fbe3151..b4a1a01098 100644 --- a/marketplace-solver/src/events.rs +++ b/marketplace-solver/src/events.rs @@ -59,7 +59,7 @@ pub async fn handle_events( match event.event { hotshot::types::EventType::ViewFinished { view_number } => { tracing::debug!("received view finished event {view_number:?}") - } + }, _ => (), } } diff --git a/marketplace-solver/src/testing.rs b/marketplace-solver/src/testing.rs index b6745fa86f..b78e0c838a 100755 --- a/marketplace-solver/src/testing.rs +++ b/marketplace-solver/src/testing.rs @@ -268,7 +268,7 @@ mod test { // Ensure the error indicates an invalid signature match err { SolverError::InvalidSignature(signature) - if reg_ns_2.signature.to_string() == signature => {} + if reg_ns_2.signature.to_string() == signature => {}, _ => panic!("err {err:?}"), } } @@ -375,7 +375,7 @@ mod test { .unwrap_err(); match err { - SolverError::Database(_) => {} + SolverError::Database(_) => {}, _ => panic!("err {err:?}"), } } diff --git a/node-metrics/src/api/node_validator/v0/cdn/mod.rs b/node-metrics/src/api/node_validator/v0/cdn/mod.rs index 9359afec01..e4c7d2d974 100644 --- a/node-metrics/src/api/node_validator/v0/cdn/mod.rs +++ b/node-metrics/src/api/node_validator/v0/cdn/mod.rs @@ -95,7 +95,7 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error receiving message: {:?}", err); continue; - } + }, }; // We want to try and decode this message. @@ -106,17 +106,17 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error deserializing message: {:?}", err); continue; - } + }, }; let external_message_deserialize_result = match message.kind { MessageKind::External(external_message) => { bincode::deserialize::(&external_message) - } + }, _ => { tracing::error!("unexpected message kind: {:?}", message); continue; - } + }, }; let external_message = match external_message_deserialize_result { @@ -124,7 +124,7 @@ impl CdnReceiveMessagesTask { Err(err) => { tracing::error!("error deserializing message: {:?}", err); continue; - } + }, }; match external_message { @@ -137,11 +137,11 @@ impl CdnReceiveMessagesTask { tracing::error!("error sending public api url: {:?}", err); return; } - } + }, _ => { // We're not concerned about other message types - } + }, } } } @@ -237,7 +237,7 @@ impl BroadcastRollCallTask { Err(err) => { tracing::error!("error serializing rollcall request: {:?}", err); return; - } + }, }; let hotshot_message = Message:: { @@ -250,7 +250,7 @@ impl BroadcastRollCallTask { Err(err) => { tracing::error!("error serializing hotshot message: {:?}", err); return; - } + }, }; let broadcast_result = network @@ -564,7 +564,7 @@ mod test { public_key, BLSPubKey::generated_from_seed_indexed([0; 32], 0).0 ); - } + }, _ => panic!("unexpected external message"), } diff --git a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs index c60d089e62..b167b5498a 100644 --- a/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs +++ b/node-metrics/src/api/node_validator/v0/create_node_validator_api.rs @@ -119,7 +119,7 @@ impl HotShotEventProcessingTask { None => { tracing::info!("event stream closed"); break; - } + }, }; let Event { event, .. } = event; @@ -135,7 +135,7 @@ impl HotShotEventProcessingTask { panic!("HotShotEventProcessingTask leaf sender is closed, unrecoverable, the block state will stagnate."); } } - } + }, EventType::ExternalMessageReceived { data, .. } => { let roll_call_info = match bincode::deserialize(&data) { @@ -147,12 +147,12 @@ impl HotShotEventProcessingTask { err ); continue; - } + }, _ => { // Ignore any other potentially recognized messages continue; - } + }, }; let public_api_url = roll_call_info.public_api_url; @@ -163,11 +163,11 @@ impl HotShotEventProcessingTask { tracing::error!("url sender closed: {}", err); panic!("HotShotEventProcessingTask url sender is closed, unrecoverable, the node state will stagnate."); } - } + }, _ => { // Ignore all other events continue; - } + }, } } } @@ -236,7 +236,7 @@ impl ProcessExternalMessageHandlingTask { None => { tracing::error!("external message receiver closed"); break; - } + }, }; match external_message { @@ -248,12 +248,12 @@ impl ProcessExternalMessageHandlingTask { tracing::error!("url sender closed: {}", err); break; } - } + }, _ => { // Ignore all other messages continue; - } + }, } } } @@ -399,14 +399,14 @@ mod test { Ok(node_validator_api) => node_validator_api, Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; match app.register_module("node-validator", node_validator_api) { - Ok(_) => {} + Ok(_) => {}, Err(err) => { panic!("error registering node validator api: {:?}", err); - } + }, } let (leaf_sender, leaf_receiver) = mpsc::channel(10); @@ -449,7 +449,7 @@ mod test { Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; // We would like to wait until being signaled diff --git a/node-metrics/src/api/node_validator/v0/mod.rs b/node-metrics/src/api/node_validator/v0/mod.rs index b364c105ee..da900fa467 100644 --- a/node-metrics/src/api/node_validator/v0/mod.rs +++ b/node-metrics/src/api/node_validator/v0/mod.rs @@ -64,11 +64,11 @@ impl fmt::Display for Error { match self { Self::UnhandledSurfDisco(status, msg) => { write!(f, "Unhandled Surf Disco Error: {} - {}", status, msg) - } + }, Self::UnhandledTideDisco(status, msg) => { write!(f, "Unhandled Tide Disco Error: {} - {}", status, msg) - } + }, } } } @@ -255,7 +255,7 @@ where // let's queue up the next client message to receive next_client_message = socket_stream.next(); next_server_message = remaining_server_message; - } + }, Either::Right((server_message, remaining_client_message)) => { // Alright, we have a server message, we want to forward it // to the down-stream client. @@ -277,7 +277,7 @@ where // let's queue up the next server message to receive next_server_message = server_message_receiver.next(); next_client_message = remaining_client_message; - } + }, } } @@ -327,7 +327,7 @@ pub async fn get_stake_table_from_sequencer( Err(err) => { tracing::info!("retrieve stake table request failed: {}", err); return Err(err); - } + }, }; let public_hot_shot_config = sequencer_config.config; @@ -481,7 +481,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { Err(err) => { tracing::info!("retrieve block height request failed: {}", err); return Err(err); - } + }, }; let latest_block_start = block_height.saturating_sub(50); @@ -504,7 +504,7 @@ impl LeafStreamRetriever for HotshotQueryServiceLeafStreamRetriever { Err(err) => { tracing::info!("retrieve leaves stream failed: {}", err); return Err(err); - } + }, }; Ok(leaves_stream) @@ -621,7 +621,7 @@ impl ProcessProduceLeafStreamTask { delay = backoff_params.backoff(delay); sleep(delay).await; continue; - } + }, Ok(leaves_stream) => leaves_stream, }; @@ -795,7 +795,7 @@ pub fn populate_node_identity_from_scrape(node_identity: &mut NodeIdentity, scra // We couldn't parse the public key, so we can't create a NodeIdentity. tracing::info!("parsing public key failed: {}", err); return; - } + }, } } else { // We were unable to find the public key in the scrape result. @@ -878,7 +878,7 @@ pub fn node_identity_from_scrape(scrape: Scrape) -> Option { Err(err) => { tracing::info!("parsing public key failed: {}", err); return None; - } + }, }; let mut node_identity = NodeIdentity::from_public_key(public_key); @@ -937,7 +937,7 @@ impl ProcessNodeIdentityUrlStreamTask { None => { tracing::info!("node identity url stream closed"); return; - } + }, }; // Alright we have a new Url to try and scrape for a Node Identity. @@ -949,7 +949,7 @@ impl ProcessNodeIdentityUrlStreamTask { Err(err) => { tracing::warn!("get node identity from url failed. bad base url?: {}", err); continue; - } + }, }; let send_result = node_identity_sender.send(node_identity).await; diff --git a/node-metrics/src/lib.rs b/node-metrics/src/lib.rs index cc1d7c8e90..bfbad63039 100644 --- a/node-metrics/src/lib.rs +++ b/node-metrics/src/lib.rs @@ -233,10 +233,10 @@ pub async fn run_standalone_service(options: Options) { api::node_validator::v0::define_api().expect("error defining node validator api"); match app.register_module("node-validator", node_validator_api) { - Ok(_) => {} + Ok(_) => {}, Err(err) => { panic!("error registering node validator api: {:?}", err); - } + }, } let (leaf_sender, leaf_receiver) = mpsc::channel(10); @@ -260,7 +260,7 @@ pub async fn run_standalone_service(options: Options) { Err(err) => { panic!("error defining node validator api: {:?}", err); - } + }, }; let _cdn_tasks = if let Some(cdn_broker_url_string) = options.cdn_marshal_endpoint() { @@ -278,7 +278,7 @@ pub async fn run_standalone_service(options: Options) { Ok(cdn_network) => cdn_network, Err(err) => { panic!("error creating cdn network: {:?}", err); - } + }, }; let url_sender = node_validator_task_state.url_sender.clone(); diff --git a/node-metrics/src/service/client_message/mod.rs b/node-metrics/src/service/client_message/mod.rs index d19881430f..704335c8d1 100644 --- a/node-metrics/src/service/client_message/mod.rs +++ b/node-metrics/src/service/client_message/mod.rs @@ -141,7 +141,7 @@ mod tests { match internal_client_message { InternalClientMessage::Request(id, _) => { assert_eq!(id, client_id); - } + }, _ => panic!("Unexpected InternalClientMessage"), } } diff --git a/node-metrics/src/service/client_state/mod.rs b/node-metrics/src/service/client_state/mod.rs index 1ca1415d57..4574a3f932 100644 --- a/node-metrics/src/service/client_state/mod.rs +++ b/node-metrics/src/service/client_state/mod.rs @@ -112,7 +112,7 @@ impl std::fmt::Display for HandleConnectedError { match self { HandleConnectedError::ClientSendError(err) => { write!(f, "handle connected error: client send error: {}", err) - } + }, } } } @@ -235,7 +235,7 @@ impl std::fmt::Display for HandleRequestBlocksSnapshotsError { "handle request blocks snapshot error: client send error:: {}", err ) - } + }, } } } @@ -306,7 +306,7 @@ impl std::fmt::Display for HandleRequestNodeIdentitySnapshotError { "handle request node identity snapshot error: client send error: {}", err ) - } + }, } } } @@ -374,7 +374,7 @@ impl std::fmt::Display for HandleRequestHistogramSnapshotError { "handle request histogram snapshot error: client send error: {}", err ) - } + }, } } } @@ -461,7 +461,7 @@ impl std::fmt::Display for HandleRequestVotersSnapshotError { "handle request voters snapshot error: client send error: {}", err ) - } + }, } } } @@ -557,27 +557,27 @@ impl std::fmt::Display for ProcessClientMessageError { match self { ProcessClientMessageError::Connected(err) => { write!(f, "process client message error: connected: {}", err) - } + }, ProcessClientMessageError::BlocksSnapshot(err) => { write!(f, "process client message error: blocks snapshot: {}", err) - } + }, ProcessClientMessageError::NodeIdentitySnapshot(err) => { write!( f, "process client message error: node identity snapshot: {}", err ) - } + }, ProcessClientMessageError::HistogramSnapshot(err) => { write!( f, "process client message error: histogram snapshot: {}", err ) - } + }, ProcessClientMessageError::VotersSnapshot(err) => { write!(f, "process client message error: voters snapshot: {}", err) - } + }, } } } @@ -615,27 +615,27 @@ where InternalClientMessage::Connected(sender) => { handle_client_message_connected(sender, client_thread_state).await?; Ok(()) - } + }, InternalClientMessage::Disconnected(client_id) => { handle_client_message_disconnected(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeLatestBlock) => { handle_client_message_subscribe_latest_block(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeNodeIdentity) => { handle_client_message_subscribe_node_identity(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::SubscribeVoters) => { handle_client_message_subscribe_voters(client_id, client_thread_state).await; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestBlocksSnapshot) => { handle_client_message_request_blocks_snapshot( @@ -645,7 +645,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestNodeIdentitySnapshot) => { handle_client_message_request_node_identity_snapshot( @@ -655,7 +655,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestHistogramSnapshot) => { handle_client_message_request_histogram_snapshot( @@ -665,7 +665,7 @@ where ) .await?; Ok(()) - } + }, InternalClientMessage::Request(client_id, ClientMessage::RequestVotersSnapshot) => { handle_client_message_request_voters_snapshot( @@ -675,7 +675,7 @@ where ) .await?; Ok(()) - } + }, } } diff --git a/node-metrics/src/service/data_state/mod.rs b/node-metrics/src/service/data_state/mod.rs index ae820f4e5d..ead23271eb 100644 --- a/node-metrics/src/service/data_state/mod.rs +++ b/node-metrics/src/service/data_state/mod.rs @@ -200,10 +200,10 @@ impl std::fmt::Display for ProcessLeafError { match self { ProcessLeafError::BlockSendError(err) => { write!(f, "error sending block detail to sender: {}", err) - } + }, ProcessLeafError::VotersSendError(err) => { write!(f, "error sending voters to sender: {}", err) - } + }, } } } @@ -396,10 +396,10 @@ impl ProcessLeafStreamTask { match err { ProcessLeafError::BlockSendError(_) => { panic!("ProcessLeafStreamTask: process_incoming_leaf failed, underlying sink is closed, blocks will stagnate: {}", err) - } + }, ProcessLeafError::VotersSendError(_) => { panic!("ProcessLeafStreamTask: process_incoming_leaf failed, underlying sink is closed, voters will stagnate: {}", err) - } + }, } } } @@ -429,7 +429,7 @@ impl std::fmt::Display for ProcessNodeIdentityError { match self { ProcessNodeIdentityError::SendError(err) => { write!(f, "error sending node identity to sender: {}", err) - } + }, } } } diff --git a/request-response/src/lib.rs b/request-response/src/lib.rs index d2cd58221b..211261cf94 100644 --- a/request-response/src/lib.rs +++ b/request-response/src/lib.rs @@ -406,24 +406,24 @@ impl< Err(e) => { warn!("Received invalid message: {e}"); continue; - } + }, }; // Handle the message based on its type match message { Message::Request(request_message) => { self.handle_request(request_message, &mut outgoing_responses); - } + }, Message::Response(response_message) => { self.handle_response(response_message, &mut incoming_responses); - } + }, } - } + }, // An error here means the receiver will _NEVER_ receive any more messages Err(e) => { error!("Request/response receive task exited: {e}"); return; - } + }, } } } diff --git a/request-response/src/message.rs b/request-response/src/message.rs index 622be26f22..a704e1c454 100644 --- a/request-response/src/message.rs +++ b/request-response/src/message.rs @@ -140,14 +140,14 @@ impl Serializable for Message { // Write the request content bytes.extend_from_slice(request_message.to_bytes()?.as_slice()); - } + }, Message::Response(response_message) => { // Write the type (response) bytes.push(1); // Write the response content bytes.extend_from_slice(response_message.to_bytes()?.as_slice()); - } + }, }; Ok(bytes) @@ -168,13 +168,13 @@ impl Serializable for Message { Ok(Message::Request(RequestMessage::from_bytes(&read_to_end( &mut bytes, )?)?)) - } + }, 1 => { // Read the `ResponseMessage` Ok(Message::Response(ResponseMessage::from_bytes( &read_to_end(&mut bytes)?, )?)) - } + }, _ => Err(anyhow::anyhow!("invalid message type")), } } @@ -353,7 +353,7 @@ mod tests { // It should not be valid anymore (false, Duration::from_secs(1)) - } + }, 2 => { // Alter the timestamp @@ -361,13 +361,13 @@ mod tests { // It should not be valid anymore (false, Duration::from_secs(1)) - } + }, 3 => { // Change the request ttl to be 0. This should make the request // invalid immediately (true, Duration::from_secs(0)) - } + }, _ => unreachable!(), }; diff --git a/sequencer/src/api.rs b/sequencer/src/api.rs index a8a6349d68..e0435f3729 100644 --- a/sequencer/src/api.rs +++ b/sequencer/src/api.rs @@ -335,7 +335,7 @@ impl< Ok(accounts) => return Ok(accounts), Err(err) => { tracing::info!("accounts not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -372,7 +372,7 @@ impl< } (Arc::new(state), delta.clone()) - } + }, _ => { // If we don't already have a leaf for this view, or if we don't have the view // at all, we can create a new view based on the recovered leaf and add it to @@ -381,7 +381,7 @@ impl< let mut state = ValidatedState::from_header(leaf.block_header()); state.fee_merkle_tree = tree.clone(); (Arc::new(state), None) - } + }, }; if let Err(err) = consensus.update_leaf(leaf, Arc::clone(&state), delta) { tracing::warn!(?view, "cannot update fetched account state: {err:#}"); @@ -403,7 +403,7 @@ impl< Ok(frontier) => return Ok(frontier), Err(err) => { tracing::info!("frontier is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -419,7 +419,7 @@ impl< Ok(cf) => return Ok(cf), Err(err) => { tracing::info!("chain config is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -431,7 +431,7 @@ impl< Ok(cf) => return Ok(cf), Err(err) => { tracing::info!("chain config is not in memory, trying storage: {err:#}"); - } + }, } // Try storage. @@ -2503,7 +2503,7 @@ mod test { let new_version = upgrade.new_version; assert_eq!(new_version, ::Upgrade::VERSION); break upgrade.new_version_first_view; - } + }, _ => continue, } }; diff --git a/sequencer/src/api/sql.rs b/sequencer/src/api/sql.rs index b19fdc83f1..e703b43dde 100644 --- a/sequencer/src/api/sql.rs +++ b/sequencer/src/api/sql.rs @@ -145,7 +145,7 @@ impl CatchupStorage for SqlStorage { LookupResult::Ok(_, proof) => Ok(proof), _ => { bail!("state snapshot {view:?},{height} was found but does not contain frontier at height {}; this should not be possible", height - 1); - } + }, } } } @@ -275,13 +275,13 @@ async fn load_accounts( ))? { MerkleNode::Leaf { pos, elem, .. } => { snapshot.remember(*pos, *elem, proof)?; - } + }, MerkleNode::Empty => { snapshot.non_membership_remember(*account, proof)?; - } + }, _ => { bail!("Invalid proof"); - } + }, } } @@ -442,7 +442,7 @@ async fn header_dependencies( // so the STF will be able to look it up later. catchup.add_chain_config(cf); cf - } + }, } }; diff --git a/sequencer/src/bin/espresso-bridge.rs b/sequencer/src/bin/espresso-bridge.rs index 2382ba5b38..e036753e34 100644 --- a/sequencer/src/bin/espresso-bridge.rs +++ b/sequencer/src/bin/espresso-bridge.rs @@ -213,7 +213,7 @@ async fn deposit(opt: Deposit) -> anyhow::Result<()> { Err(err) => { tracing::warn!("error in header stream: {err:#}"); continue; - } + }, }; let Some(l1_finalized) = header.l1_finalized() else { continue; diff --git a/sequencer/src/bin/keygen.rs b/sequencer/src/bin/keygen.rs index 188179f982..f4ac7d7d83 100644 --- a/sequencer/src/bin/keygen.rs +++ b/sequencer/src/bin/keygen.rs @@ -33,7 +33,7 @@ impl Scheme { Self::All => { Self::Bls.gen(seed, index, env_file)?; Self::Schnorr.gen(seed, index, env_file)?; - } + }, Self::Bls => { let (pub_key, priv_key) = BLSPubKey::generated_from_seed_indexed(seed, index); let priv_key = priv_key.to_tagged_base64()?; @@ -43,7 +43,7 @@ impl Scheme { "ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY={priv_key}" )?; tracing::info!(%pub_key, "generated staking key") - } + }, Self::Schnorr => { let key_pair = StateKeyPair::generate_from_seed_indexed(seed, index); let priv_key = key_pair.sign_key_ref().to_tagged_base64()?; @@ -54,7 +54,7 @@ impl Scheme { )?; writeln!(env_file, "ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY={priv_key}")?; tracing::info!(pub_key = %key_pair.ver_key(), "generated state key"); - } + }, } Ok(()) } diff --git a/sequencer/src/bin/nasty-client.rs b/sequencer/src/bin/nasty-client.rs index b3741582d8..d981c2fa1f 100644 --- a/sequencer/src/bin/nasty-client.rs +++ b/sequencer/src/bin/nasty-client.rs @@ -520,7 +520,7 @@ impl ResourceManager { Ok(res) if i == 0 => { // Succeeded on the first try, get on with it. return Ok(res); - } + }, Ok(res) => { // Succeeded after at least one failure; retry a number of additional times to // be sure the endpoint is healed. @@ -531,14 +531,14 @@ impl ResourceManager { )?; } return Ok(res); - } + }, Err(err) if i < self.cfg.max_retries => { tracing::warn!("failed, will retry: {err:#}"); i += 1; - } + }, Err(err) => { return Err(err).context("failed too many times"); - } + }, } } } @@ -674,7 +674,7 @@ impl ResourceManager { obj.height() ); } - } + }, Err(_) if to - from > limit => { tracing::info!( limit, @@ -682,10 +682,10 @@ impl ResourceManager { to, "range query exceeding limit failed as expected" ); - } + }, Err(err) => { return Err(err).context("error in range query"); - } + }, } self.metrics.query_range_actions[&T::RESOURCE].add(1); @@ -800,7 +800,7 @@ impl ResourceManager { ); } break obj; - } + }, Err(err) if refreshed.elapsed() >= self.cfg.web_socket_timeout => { // Streams are allowed to fail if the connection is too old. Warn about it, // but refresh the connection and try again. @@ -818,7 +818,7 @@ impl ResourceManager { "{} stream refreshed due to connection reset", Self::singular(), ); - } + }, Err(err) => { // Errors on a relatively fresh connection are not allowed. Close the stream // since it is apparently in a bad state, and return an error. @@ -830,7 +830,7 @@ impl ResourceManager { Self::singular(), refreshed.elapsed() )); - } + }, } }; @@ -944,11 +944,11 @@ impl ResourceManager
{ // The block state at height 0 is empty, so to have a valid query just adjust to // querying at height 1. At height 1, the only valid index to query is 0. (1, 0) - } + }, block => { // At any other height, all indices between 0 and `block - 1` are valid to query. (block, index % (block - 1)) - } + }, }; // Get the header of the state snapshot we're going to query and the block commitment we're @@ -1344,7 +1344,7 @@ impl Client { Resource::Payloads => self.payloads.close_stream(id).await, }; Ok(()) - } + }, Action::PollStream { resource, id, @@ -1357,16 +1357,16 @@ impl Client { }, Action::QueryWindow { from, duration } => { self.headers.query_window(from, duration).await - } + }, Action::QueryNamespace { block, namespace } => { self.blocks.query_namespace(block, namespace).await - } + }, Action::QueryBlockState { block, index } => { self.headers.query_block_state(block, index).await - } + }, Action::QueryFeeState { block, builder } => { self.headers.query_fee_state(block, builder).await - } + }, } } } diff --git a/sequencer/src/bin/pub-key.rs b/sequencer/src/bin/pub-key.rs index 0c2bbc99cc..38efb49a86 100644 --- a/sequencer/src/bin/pub-key.rs +++ b/sequencer/src/bin/pub-key.rs @@ -49,7 +49,7 @@ fn main() { (false, PrivateKey::Bls(key)) => println!("{}", PubKey::from_private(&key)), (false, PrivateKey::Schnorr(key)) => { println!("{}", StateKeyPair::from_sign_key(key).ver_key()) - } + }, // Libp2p (true, PrivateKey::Bls(key)) => { @@ -57,9 +57,9 @@ fn main() { "{}", derive_libp2p_peer_id::(&key).expect("Failed to derive libp2p peer ID") ); - } + }, (true, _) => { eprintln!("Key type unsupported for libp2p peer ID derivation"); - } + }, } } diff --git a/sequencer/src/bin/reset-storage.rs b/sequencer/src/bin/reset-storage.rs index a635cda1b7..88d83fd1fd 100644 --- a/sequencer/src/bin/reset-storage.rs +++ b/sequencer/src/bin/reset-storage.rs @@ -35,11 +35,11 @@ async fn main() -> anyhow::Result<()> { Command::Fs(opt) => { tracing::warn!("resetting file system storage {opt:?}"); reset_storage(opt).await - } + }, Command::Sql(opt) => { tracing::warn!("resetting SQL storage {opt:?}"); reset_storage(*opt).await - } + }, } } diff --git a/sequencer/src/bin/submit-transactions.rs b/sequencer/src/bin/submit-transactions.rs index 87795b4761..03102a4bd9 100644 --- a/sequencer/src/bin/submit-transactions.rs +++ b/sequencer/src/bin/submit-transactions.rs @@ -229,7 +229,7 @@ async fn main() { Err(err) => { tracing::warn!("error getting block: {err}"); continue; - } + }, }; let received_at = Instant::now(); tracing::debug!("got block {}", block.height()); diff --git a/sequencer/src/bin/update-permissioned-stake-table.rs b/sequencer/src/bin/update-permissioned-stake-table.rs index a966a30f22..5beda27335 100644 --- a/sequencer/src/bin/update-permissioned-stake-table.rs +++ b/sequencer/src/bin/update-permissioned-stake-table.rs @@ -100,7 +100,7 @@ async fn main() -> Result<()> { Some(path) => { tracing::error!("updating stake table from path: {path:?}"); update = Some(PermissionedStakeTableUpdate::from_toml_file(&path)?); - } + }, None => { let peers = opts.state_peers.context("No state peers found")?; let clients: Vec = @@ -127,13 +127,13 @@ async fn main() -> Result<()> { update = Some(PermissionedStakeTableUpdate::new(new_stakers, Vec::new())); break; - } + }, Err(e) => { tracing::warn!("Failed to fetch config from sequencer: {e}"); - } + }, }; } - } + }, } update_stake_table( diff --git a/sequencer/src/bin/utils/keygen.rs b/sequencer/src/bin/utils/keygen.rs index a381dc0d25..5240493f16 100644 --- a/sequencer/src/bin/utils/keygen.rs +++ b/sequencer/src/bin/utils/keygen.rs @@ -32,7 +32,7 @@ impl Scheme { Self::All => { Self::Bls.gen(seed, index, env_file)?; Self::Schnorr.gen(seed, index, env_file)?; - } + }, Self::Bls => { let (pub_key, priv_key) = BLSPubKey::generated_from_seed_indexed(seed, index); let priv_key = priv_key.to_tagged_base64()?; @@ -42,7 +42,7 @@ impl Scheme { "ESPRESSO_SEQUENCER_PRIVATE_STAKING_KEY={priv_key}" )?; tracing::info!(%pub_key, "generated staking key") - } + }, Self::Schnorr => { let key_pair = StateKeyPair::generate_from_seed_indexed(seed, index); let priv_key = key_pair.sign_key_ref().to_tagged_base64()?; @@ -53,7 +53,7 @@ impl Scheme { )?; writeln!(env_file, "ESPRESSO_SEQUENCER_PRIVATE_STATE_KEY={priv_key}")?; tracing::info!(pub_key = %key_pair.ver_key(), "generated state key"); - } + }, } Ok(()) } diff --git a/sequencer/src/bin/utils/main.rs b/sequencer/src/bin/utils/main.rs index 97ac6486ba..d2636be6ff 100644 --- a/sequencer/src/bin/utils/main.rs +++ b/sequencer/src/bin/utils/main.rs @@ -34,7 +34,7 @@ async fn main() -> anyhow::Result<()> { Command::Pubkey(opt) => { pubkey::run(opt); Ok(()) - } + }, Command::ResetStorage(opt) => reset_storage::run(opt).await, } } diff --git a/sequencer/src/bin/utils/pubkey.rs b/sequencer/src/bin/utils/pubkey.rs index 84b65c5042..83b91f1b95 100644 --- a/sequencer/src/bin/utils/pubkey.rs +++ b/sequencer/src/bin/utils/pubkey.rs @@ -47,7 +47,7 @@ pub fn run(opt: Options) { (false, PrivateKey::Bls(key)) => println!("{}", PubKey::from_private(&key)), (false, PrivateKey::Schnorr(key)) => { println!("{}", StateKeyPair::from_sign_key(key).ver_key()) - } + }, // Libp2p (true, PrivateKey::Bls(key)) => { @@ -55,9 +55,9 @@ pub fn run(opt: Options) { "{}", derive_libp2p_peer_id::(&key).expect("Failed to derive libp2p peer ID") ); - } + }, (true, _) => { eprintln!("Key type unsupported for libp2p peer ID derivation"); - } + }, } } diff --git a/sequencer/src/bin/utils/reset_storage.rs b/sequencer/src/bin/utils/reset_storage.rs index b25c8a3c11..7e1e8e080c 100644 --- a/sequencer/src/bin/utils/reset_storage.rs +++ b/sequencer/src/bin/utils/reset_storage.rs @@ -32,11 +32,11 @@ pub async fn run(opt: Commands) -> anyhow::Result<()> { SequencerStorage::Fs(opt) => { tracing::warn!("resetting sequencer file system storage {opt:?}"); reset_storage(opt).await - } + }, SequencerStorage::Sql(opt) => { tracing::warn!("resetting sequencer SQL storage {opt:?}"); reset_storage(*opt).await - } + }, }, Commands::Solver(opt) => { @@ -45,7 +45,7 @@ pub async fn run(opt: Commands) -> anyhow::Result<()> { opts.connect().await?; Ok(()) - } + }, } } diff --git a/sequencer/src/bin/verify-headers.rs b/sequencer/src/bin/verify-headers.rs index 4b999070a3..c86a037524 100644 --- a/sequencer/src/bin/verify-headers.rs +++ b/sequencer/src/bin/verify-headers.rs @@ -134,7 +134,7 @@ async fn get_header( // Back off a bit and then retry. sleep(Duration::from_millis(100)).await; - } + }, } } } @@ -147,12 +147,12 @@ async fn get_l1_block(l1: &Provider, height: u64) -> L1BlockInfo { tracing::warn!("L1 block {height} not yet available"); sleep(Duration::from_secs(1)).await; continue; - } + }, Err(err) => { tracing::warn!("error fetching L1 block {height}: {err}"); sleep(Duration::from_millis(100)).await; continue; - } + }, }; let Some(hash) = block.hash else { diff --git a/sequencer/src/catchup.rs b/sequencer/src/catchup.rs index facfb02bf7..b80b7f89a9 100644 --- a/sequencer/src/catchup.rs +++ b/sequencer/src/catchup.rs @@ -75,7 +75,7 @@ pub(crate) async fn local_and_remote( Err(err) => { tracing::warn!("not using local catchup: {err:#}"); Arc::new(remote) - } + }, } } @@ -164,15 +164,15 @@ impl StatePeers { requests.insert(id, true); res = Ok(t); break; - } + }, Ok(Err(err)) => { tracing::warn!(id, ?score, peer = %client.url, "error from peer: {err:#}"); requests.insert(id, false); - } + }, Err(_) => { tracing::warn!(id, ?score, peer = %client.url, ?timeout_dur, "request timed out"); requests.insert(id, false); - } + }, } } diff --git a/sequencer/src/external_event_handler.rs b/sequencer/src/external_event_handler.rs index 86659f946f..917dc89d33 100644 --- a/sequencer/src/external_event_handler.rs +++ b/sequencer/src/external_event_handler.rs @@ -83,7 +83,7 @@ impl ExternalEventHandler { self.request_response_sender .send(request_response.into()) .await?; - } + }, } Ok(()) } @@ -111,14 +111,14 @@ impl ExternalEventHandler { Err(err) => { tracing::warn!("Failed to serialize direct message: {}", err); continue; - } + }, }; // Send the message to the recipient if let Err(err) = network.direct_message(message_bytes, recipient).await { tracing::error!("Failed to send message: {:?}", err); }; - } + }, OutboundMessage::Broadcast(message) => { // Wrap it in the real message type @@ -133,7 +133,7 @@ impl ExternalEventHandler { Err(err) => { tracing::warn!("Failed to serialize broadcast message: {}", err); continue; - } + }, }; // Broadcast the message to the global topic @@ -143,7 +143,7 @@ impl ExternalEventHandler { { tracing::error!("Failed to broadcast message: {:?}", err); }; - } + }, } } } diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index 790a8d6193..58a7d4f9b2 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -257,12 +257,12 @@ mod upgrade_ser { return Err(de::Error::custom( "both view and time mode parameters are set", )) - } + }, (None, None) => { return Err(de::Error::custom( "no view or time mode parameters provided", )) - } + }, (None, Some(v)) => { if v.start_proposing_view > v.stop_proposing_view { return Err(de::Error::custom( @@ -277,7 +277,7 @@ mod upgrade_ser { upgrade_type: fields.upgrade_type, }, ); - } + }, (Some(t), None) => { if t.start_proposing_time.unix_timestamp() > t.stop_proposing_time.unix_timestamp() @@ -294,7 +294,7 @@ mod upgrade_ser { upgrade_type: fields.upgrade_type.clone(), }, ); - } + }, } } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 2b2ed2947a..30d185b986 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -312,7 +312,7 @@ pub async fn init_node( (Some(config), _) => { tracing::info!("loaded network config from storage, rejoining existing network"); (config, false) - } + }, // If we were told to fetch the config from an already-started peer, do so. (None, Some(peers)) => { tracing::info!(?peers, "loading network config from peers"); @@ -330,7 +330,7 @@ pub async fn init_node( ); persistence.save_config(&config).await?; (config, false) - } + }, // Otherwise, this is a fresh network; load from the orchestrator. (None, None) => { tracing::info!("loading network config from orchestrator"); @@ -356,7 +356,7 @@ pub async fn init_node( persistence.save_config(&config).await?; tracing::error!("all nodes connected"); (config, true) - } + }, }; if let Some(upgrade) = genesis.upgrades.get(&V::Upgrade::VERSION) { @@ -455,7 +455,7 @@ pub async fn init_node( ethers::types::U256::from(timestamp.unix_timestamp()).to_alloy(), ) .await - } + }, }; let mut genesis_state = ValidatedState { diff --git a/sequencer/src/options.rs b/sequencer/src/options.rs index 242707e23e..696cf7bf3f 100755 --- a/sequencer/src/options.rs +++ b/sequencer/src/options.rs @@ -472,10 +472,10 @@ fn fmt_opt_urls( write!(fmt, "Some(")?; fmt_urls(urls, fmt)?; write!(fmt, ")")?; - } + }, None => { write!(fmt, "None")?; - } + }, } Ok(()) } @@ -536,13 +536,13 @@ impl ModuleArgs { match module { SequencerModule::Storage(m) => { curr = m.add(&mut modules.storage_fs, &mut provided)? - } + }, SequencerModule::StorageFs(m) => { curr = m.add(&mut modules.storage_fs, &mut provided)? - } + }, SequencerModule::StorageSql(m) => { curr = m.add(&mut modules.storage_sql, &mut provided)? - } + }, SequencerModule::Http(m) => curr = m.add(&mut modules.http, &mut provided)?, SequencerModule::Query(m) => curr = m.add(&mut modules.query, &mut provided)?, SequencerModule::Submit(m) => curr = m.add(&mut modules.submit, &mut provided)?, @@ -551,10 +551,10 @@ impl ModuleArgs { SequencerModule::Config(m) => curr = m.add(&mut modules.config, &mut provided)?, SequencerModule::HotshotEvents(m) => { curr = m.add(&mut modules.hotshot_events, &mut provided)? - } + }, SequencerModule::Explorer(m) => { curr = m.add(&mut modules.explorer, &mut provided)? - } + }, } } diff --git a/sequencer/src/persistence/fs.rs b/sequencer/src/persistence/fs.rs index 853142fa9a..8d44aa0f15 100644 --- a/sequencer/src/persistence/fs.rs +++ b/sequencer/src/persistence/fs.rs @@ -596,7 +596,7 @@ impl SequencerPersistence for Persistence { // managed to persist the decided leaves successfully, and the event processing will // just run again at the next decide. tracing::warn!(?view, "event processing failed: {err:#}"); - } + }, Ok(intervals) => { if let Err(err) = inner.collect_garbage(view, &intervals) { // Similarly, garbage collection is not an error. We have done everything we @@ -604,7 +604,7 @@ impl SequencerPersistence for Persistence { // error but do not return it. tracing::warn!(?view, "GC failed: {err:#}"); } - } + }, } Ok(()) @@ -836,7 +836,7 @@ impl SequencerPersistence for Persistence { // some unintended file whose name happened to match the naming convention. tracing::warn!(?view, "ignoring malformed quorum proposal file: {err:#}"); continue; - } + }, }; let proposal2 = convert_proposal(proposal); diff --git a/sequencer/src/persistence/sql.rs b/sequencer/src/persistence/sql.rs index e2383a81ca..a6387b6670 100644 --- a/sequencer/src/persistence/sql.rs +++ b/sequencer/src/persistence/sql.rs @@ -661,7 +661,7 @@ impl Persistence { // we do have. tracing::warn!("error loading row: {err:#}"); break; - } + }, }; let leaf_data: Vec = row.get("leaf"); @@ -1884,7 +1884,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let bytes = match query_as::<(Vec,)>( @@ -1899,7 +1899,7 @@ impl Provider for Persistence { Err(err) => { tracing::error!("error loading VID share: {err:#}"); return None; - } + }, }; let share: Proposal> = @@ -1908,7 +1908,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("error decoding VID share: {err:#}"); return None; - } + }, }; match share.data { @@ -1928,7 +1928,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let bytes = match query_as::<(Vec,)>( @@ -1943,7 +1943,7 @@ impl Provider for Persistence { Err(err) => { tracing::warn!("error loading DA proposal: {err:#}"); return None; - } + }, }; let proposal: Proposal> = match bincode::deserialize(&bytes) @@ -1952,7 +1952,7 @@ impl Provider for Persistence { Err(err) => { tracing::error!("error decoding DA proposal: {err:#}"); return None; - } + }, }; Some(Payload::from_bytes( @@ -1971,7 +1971,7 @@ impl Provider> for Persistence { Err(err) => { tracing::warn!("could not open transaction: {err:#}"); return None; - } + }, }; let (leaf, qc) = match fetch_leaf_from_proposals(&mut tx, req).await { @@ -1979,7 +1979,7 @@ impl Provider> for Persistence { Err(err) => { tracing::info!("requested leaf not found in undecided proposals: {err:#}"); return None; - } + }, }; match LeafQueryData::new(leaf, qc) { @@ -1987,7 +1987,7 @@ impl Provider> for Persistence { Err(err) => { tracing::warn!("fetched invalid leaf: {err:#}"); None - } + }, } } } diff --git a/sequencer/src/proposal_fetcher.rs b/sequencer/src/proposal_fetcher.rs index a5d143188a..1b26dca330 100644 --- a/sequencer/src/proposal_fetcher.rs +++ b/sequencer/src/proposal_fetcher.rs @@ -184,10 +184,10 @@ where let leaf = proposal.data.justify_qc().data.leaf_commit; self.request((view, leaf)).await; return Ok(()); - } + }, Err(err) => { tracing::info!("proposal missing from storage; fetching from network: {err:#}"); - } + }, } let future = self.consensus.read().await.request_proposal(view, leaf)?; diff --git a/sequencer/src/request_response/recipient_source.rs b/sequencer/src/request_response/recipient_source.rs index a0dcbbd69d..ce9b5819d5 100644 --- a/sequencer/src/request_response/recipient_source.rs +++ b/sequencer/src/request_response/recipient_source.rs @@ -38,7 +38,7 @@ impl RecipientSourceTrait for RecipientSource { .iter() .map(|entry| entry.stake_table_entry.stake_key) .collect() - } + }, } } } diff --git a/sequencer/src/restart_tests.rs b/sequencer/src/restart_tests.rs index 4c6c5a82c5..62a79ac1f0 100755 --- a/sequencer/src/restart_tests.rs +++ b/sequencer/src/restart_tests.rs @@ -358,7 +358,7 @@ impl TestNode { sleep(delay).await; delay *= 2; retries -= 1; - } + }, } }; diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index dd42127d26..277190ff6c 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -235,7 +235,7 @@ where .boxed() }) .await? - } + }, None => { init_node( genesis, @@ -251,7 +251,7 @@ where proposal_fetcher_config, ) .await? - } + }, }; Ok(ctx) diff --git a/sequencer/src/state.rs b/sequencer/src/state.rs index e7f9160e41..884674ab94 100644 --- a/sequencer/src/state.rs +++ b/sequencer/src/state.rs @@ -300,12 +300,12 @@ where parent_leaf = leaf; parent_state = state; break; - } + }, Err(err) => { tracing::error!(height = leaf.height(), "failed to updated state: {err:#}"); // If we fail, delay for a second and retry. sleep(Duration::from_secs(1)).await; - } + }, } } } diff --git a/sequencer/src/state_signature.rs b/sequencer/src/state_signature.rs index 87ff5b1761..9eeb1bf798 100644 --- a/sequencer/src/state_signature.rs +++ b/sequencer/src/state_signature.rs @@ -97,10 +97,10 @@ impl StateSigner { tracing::warn!("Error posting signature to the relay server: {:?}", error); } } - } + }, Err(err) => { tracing::error!("Error generating light client state: {:?}", err) - } + }, } } diff --git a/sequencer/src/state_signature/relay_server.rs b/sequencer/src/state_signature/relay_server.rs index fcfda46374..c718d8a185 100644 --- a/sequencer/src/state_signature/relay_server.rs +++ b/sequencer/src/state_signature/relay_server.rs @@ -149,11 +149,11 @@ impl StateRelayServerDataSource for StateRelayServerState { StatusCode::BAD_REQUEST, "A signature of this light client state is already posted at this block height for this key.".to_owned(), )); - } + }, std::collections::hash_map::Entry::Vacant(entry) => { entry.insert(signature); bundle.accumulated_weight += *weight; - } + }, } if bundle.accumulated_weight >= self.threshold { @@ -204,7 +204,7 @@ where reason: err.to_string(), })?; Api::::new(toml)? - } + }, }; api.get("getlateststate", |_req, state| { diff --git a/types/src/v0/impls/block/full_payload/ns_proof.rs b/types/src/v0/impls/block/full_payload/ns_proof.rs index 837e81885e..2b8eeaf567 100644 --- a/types/src/v0/impls/block/full_payload/ns_proof.rs +++ b/types/src/v0/impls/block/full_payload/ns_proof.rs @@ -109,25 +109,25 @@ impl NsProof { ) .ok()? // error: internal to payload_verify() .ok()?; // verification failure - } - (None, true) => {} // 0-length namespace, nothing to verify + }, + (None, true) => {}, // 0-length namespace, nothing to verify (None, false) => { tracing::error!( "ns verify: missing proof for nonempty ns payload range {:?}", range ); return None; - } + }, (Some(_), true) => { tracing::error!("ns verify: unexpected proof for empty ns payload range"); return None; - } + }, } // verification succeeded, return some data let ns_id = ns_table.read_ns_id_unchecked(&self.ns_index); Some((self.ns_payload.export_all_txs(&ns_id), ns_id)) - } + }, VidCommitment::V1(_) => None, } } diff --git a/types/src/v0/impls/block/full_payload/payload.rs b/types/src/v0/impls/block/full_payload/payload.rs index 3707ab3a57..fc205a00fe 100644 --- a/types/src/v0/impls/block/full_payload/payload.rs +++ b/types/src/v0/impls/block/full_payload/payload.rs @@ -281,7 +281,7 @@ impl PayloadByteLen { ADVZScheme::get_payload_byte_len(common) ); return false; - } + }, }; self.0 == expected diff --git a/types/src/v0/impls/block/namespace_payload/tx_proof.rs b/types/src/v0/impls/block/namespace_payload/tx_proof.rs index 5c2026088a..370e9da08b 100644 --- a/types/src/v0/impls/block/namespace_payload/tx_proof.rs +++ b/types/src/v0/impls/block/namespace_payload/tx_proof.rs @@ -199,19 +199,19 @@ impl TxProof { { return Some(false); } - } - (None, true) => {} // 0-length tx, nothing to verify + }, + (None, true) => {}, // 0-length tx, nothing to verify (None, false) => { tracing::error!( "tx verify: missing proof for nonempty tx payload range {:?}", range ); return None; - } + }, (Some(_), true) => { tracing::error!("tx verify: unexpected proof for empty tx payload range"); return None; - } + }, } } diff --git a/types/src/v0/impls/fee_info.rs b/types/src/v0/impls/fee_info.rs index d1d61dd42e..3316abce61 100644 --- a/types/src/v0/impls/fee_info.rs +++ b/types/src/v0/impls/fee_info.rs @@ -390,7 +390,7 @@ impl FeeAccountProof { .elem() .context("presence proof is missing account balance")? .0) - } + }, FeeMerkleProof::Absence(proof) => { let tree = FeeMerkleTree::from_commitment(comm); ensure!( @@ -398,7 +398,7 @@ impl FeeAccountProof { "invalid proof" ); Ok(0.into()) - } + }, } } @@ -413,11 +413,11 @@ impl FeeAccountProof { proof, )?; Ok(()) - } + }, FeeMerkleProof::Absence(proof) => { tree.non_membership_remember(FeeAccount(self.account), proof)?; Ok(()) - } + }, } } } @@ -442,14 +442,14 @@ pub fn retain_accounts( // This remember cannot fail, since we just constructed a valid proof, and are // remembering into a tree with the same commitment. snapshot.remember(account, *elem, proof).unwrap(); - } + }, LookupResult::NotFound(proof) => { // Likewise this cannot fail. snapshot.non_membership_remember(account, proof).unwrap() - } + }, LookupResult::NotInMemory => { bail!("missing account {account}"); - } + }, } } diff --git a/types/src/v0/impls/header.rs b/types/src/v0/impls/header.rs index 2a0235a221..386c74bf54 100644 --- a/types/src/v0/impls/header.rs +++ b/types/src/v0/impls/header.rs @@ -174,7 +174,7 @@ impl<'de> Deserialize<'de> for Header { )), EitherOrVersion::Version(v) => { Err(serde::de::Error::custom(format!("invalid version {v:?}"))) - } + }, } } @@ -211,7 +211,7 @@ impl<'de> Deserialize<'de> for Header { )), EitherOrVersion::Version(v) => { Err(de::Error::custom(format!("invalid version {v:?}"))) - } + }, chain_config => Err(de::Error::custom(format!( "expected version, found chain_config {chain_config:?}" ))), @@ -604,7 +604,7 @@ impl Header { .as_ref() .fetch_chain_config(validated_cf.commit()) .await - } + }, } } } diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index 5f886269d4..11cd6b7ee9 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -195,7 +195,7 @@ impl Upgrade { config.stop_proposing_time = u64::MAX; config.start_voting_time = 0; config.stop_voting_time = u64::MAX; - } + }, UpgradeMode::Time(t) => { config.start_proposing_time = t.start_proposing_time.unix_timestamp(); config.stop_proposing_time = t.stop_proposing_time.unix_timestamp(); @@ -208,7 +208,7 @@ impl Upgrade { config.stop_proposing_view = u64::MAX; config.start_voting_view = 0; config.stop_voting_view = u64::MAX; - } + }, } } } diff --git a/types/src/v0/impls/l1.rs b/types/src/v0/impls/l1.rs index 9661dd47ec..722131b7f9 100644 --- a/types/src/v0/impls/l1.rs +++ b/types/src/v0/impls/l1.rs @@ -312,7 +312,7 @@ impl Service for SwitchingTransport { // If it's okay, log the success to the status current_transport.status.write().log_success(); Ok(res) - } + }, Err(err) => { // Increment the failure metric if let Some(f) = self_clone @@ -364,7 +364,7 @@ impl Service for SwitchingTransport { } Err(err) - } + }, } }) } @@ -737,12 +737,12 @@ impl L1Client { ); self.retry_delay().await; continue; - } + }, Err(err) => { tracing::warn!(number, "failed to get finalized L1 block: {err:#}"); self.retry_delay().await; continue; - } + }, }; break L1BlockInfo { number: block.header.number, @@ -815,7 +815,7 @@ impl L1Client { Err(err) => { tracing::warn!(from, to, %err, "Fee L1Event Error"); sleep(retry_delay).await; - } + }, } } } diff --git a/types/src/v0/impls/solver.rs b/types/src/v0/impls/solver.rs index e16fbc7f9f..fd7566b79c 100644 --- a/types/src/v0/impls/solver.rs +++ b/types/src/v0/impls/solver.rs @@ -54,7 +54,7 @@ impl Committable for RollupUpdatebody { comm = comm .u64_field("reserve_url", 2) .var_size_bytes(url.as_str().as_ref()) - } + }, Set(None) => comm = comm.u64_field("reserve_url", 1), Update::Skip => comm = comm.u64_field("reserve_url", 0), } diff --git a/types/src/v0/impls/state.rs b/types/src/v0/impls/state.rs index 030866dac1..d7803dd0e2 100644 --- a/types/src/v0/impls/state.rs +++ b/types/src/v0/impls/state.rs @@ -1118,7 +1118,7 @@ mod test { }), Header::V99(_) => { panic!("You called `Header.next()` on unimplemented version (v3)") - } + }, } } /// Replaces builder signature w/ invalid one. @@ -1147,7 +1147,7 @@ mod test { }), Header::V99(_) => { panic!("You called `Header.sign()` on unimplemented version (v3)") - } + }, } } diff --git a/types/src/v0/traits.rs b/types/src/v0/traits.rs index c578f1ecca..08bc05a293 100644 --- a/types/src/v0/traits.rs +++ b/types/src/v0/traits.rs @@ -379,7 +379,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch leaves: {err:#}" ); - } + }, } } @@ -414,7 +414,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch accounts: {err:#}" ); - } + }, } } @@ -441,7 +441,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch frontier: {err:#}" ); - } + }, } } @@ -461,7 +461,7 @@ impl StateCatchup for Vec { provider = provider.name(), "failed to fetch chain config: {err:#}" ); - } + }, } } @@ -558,11 +558,11 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { Some(view) => { tracing::info!(?view, "starting from saved view"); view - } + }, None => { tracing::info!("no saved view, starting from genesis"); ViewNumber::genesis() - } + }, }; let next_epoch_high_qc = self @@ -587,7 +587,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { let anchor_view = leaf.view_number(); (leaf, high_qc, Some(anchor_view)) - } + }, None => { tracing::info!("no saved leaf, starting from genesis leaf"); ( @@ -596,7 +596,7 @@ pub trait SequencerPersistence: Sized + Send + Sync + Clone + 'static { QuorumCertificate2::genesis::(&genesis_validated_state, &state).await, None, ) - } + }, }; let validated_state = if leaf.block_header().height() == 0 { // If we are starting from genesis, we can provide the full state. diff --git a/types/src/v0/utils.rs b/types/src/v0/utils.rs index d2c32e598f..0f38afdb5e 100644 --- a/types/src/v0/utils.rs +++ b/types/src/v0/utils.rs @@ -264,14 +264,14 @@ impl BackoffParams { Ok(res) => return Ok(res), Err(err) if self.disable => { return Err(err.context("Retryable operation failed; retries disabled")); - } + }, Err(err) => { tracing::warn!( "Retryable operation failed, will retry after {delay:?}: {err:#}" ); sleep(delay).await; delay = self.backoff(delay); - } + }, } } unreachable!() diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 014cecec6d..b76ecf50d5 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -257,14 +257,14 @@ pub async fn init_signer(provider: &Url, mnemonic: &str, index: u32) -> Option { tracing::error!("error connecting to RPC {}: {}", provider, err); return None; - } + }, }; let chain_id = match provider.get_chainid().await { Ok(id) => id.as_u64(), Err(err) => { tracing::error!("error getting chain ID: {}", err); return None; - } + }, }; let mnemonic = match MnemonicBuilder::::default() .phrase(mnemonic) @@ -274,14 +274,14 @@ pub async fn init_signer(provider: &Url, mnemonic: &str, index: u32) -> Option { tracing::error!("error building wallet: {}", err); return None; - } + }, }; let wallet = match mnemonic.build() { Ok(wallet) => wallet, Err(err) => { tracing::error!("error opening wallet: {}", err); return None; - } + }, }; let wallet = wallet.with_chain_id(chain_id); Some(SignerMiddleware::new(provider, wallet)) @@ -367,7 +367,7 @@ where tracing::error!("contract revert: {:?}", e); } return Err(anyhow!("error sending transaction: {:?}", err)); - } + }, }; let hash = pending.tx_hash(); @@ -382,12 +382,12 @@ where Ok(Some(receipt)) => receipt, Ok(None) => { return Err(anyhow!("contract call {hash:x}: no receipt")); - } + }, Err(err) => { return Err(anyhow!( "contract call {hash:x}: error getting transaction receipt: {err}" )) - } + }, }; if receipt.status != Some(1.into()) { return Err(anyhow!("contract call {hash:x}: transaction reverted")); @@ -418,19 +418,19 @@ async fn wait_for_transaction_to_be_mined( if i >= log_retries { tracing::warn!("contract call {hash:?} (retry {i}/{retries}): error getting transaction status: {err}"); } - } + }, Ok(None) => { if i >= log_retries { tracing::warn!( "contract call {hash:?} (retry {i}/{retries}): missing from mempool" ); } - } + }, Ok(Some(tx)) if tx.block_number.is_none() => { if i >= log_retries { tracing::warn!("contract call {hash:?} (retry {i}/{retries}): pending"); } - } + }, Ok(Some(_)) => return true, } From 2776d2a9b8fb924dde43f98b72ded93ee440f41b Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 10:55:50 +0500 Subject: [PATCH 113/120] revert update.rs --- .../src/data_source/update.rs | 62 ++++++++++++++----- sequencer/src/run.rs | 6 +- types/src/v0/impls/instance_state.rs | 5 +- 3 files changed, 52 insertions(+), 21 deletions(-) diff --git a/hotshot-query-service/src/data_source/update.rs b/hotshot-query-service/src/data_source/update.rs index 7045937beb..c3b832846f 100644 --- a/hotshot-query-service/src/data_source/update.rs +++ b/hotshot-query-service/src/data_source/update.rs @@ -17,16 +17,18 @@ use anyhow::{ensure, Context}; use async_trait::async_trait; use futures::future::Future; use hotshot::types::{Event, EventType}; -use hotshot_types::data::{VidDisperseShare, VidShare}; use hotshot_types::{ - data::Leaf2, + data::{ns_table::parse_ns_table, Leaf2, VidCommitment, VidDisperseShare, VidShare}, + event::LeafInfo, traits::{ block_contents::{BlockHeader, BlockPayload, EncodeBytes, GENESIS_VID_NUM_STORAGE_NODES}, node_implementation::{ConsensusTime, NodeType}, }, - vid::advz::advz_scheme, + vid::{ + advz::advz_scheme, + avidm::{init_avidm_param, AvidMScheme}, + }, }; -use hotshot_types::{data::VidCommitment, event::LeafInfo}; use jf_vid::VidScheme; use crate::{ @@ -170,19 +172,45 @@ fn genesis_vid( ) -> anyhow::Result<(VidCommonQueryData, VidShare)> { let payload = Payload::::empty().0; let bytes = payload.encode(); - let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES) - .disperse(bytes) - .context("unable to compute VID dispersal for genesis block")?; - ensure!( - VidCommitment::V0(disperse.commit) == leaf.block_header().payload_commitment(), - "computed VID commit {} for genesis block does not match header commit {}", - disperse.commit, - leaf.block_header().payload_commitment() - ); - Ok(( - VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), - VidShare::V0(disperse.shares.remove(0)), - )) + + match leaf.block_header().payload_commitment() { + VidCommitment::V0(commit) => { + let mut disperse = advz_scheme(GENESIS_VID_NUM_STORAGE_NODES) + .disperse(bytes) + .context("unable to compute VID dispersal for genesis block")?; + + ensure!( + disperse.commit == commit, + "computed VID commit {} for genesis block does not match header commit {}", + disperse.commit, + commit + ); + Ok(( + VidCommonQueryData::new(leaf.block_header().clone(), Some(disperse.common)), + VidShare::V0(disperse.shares.remove(0)), + )) + }, + VidCommitment::V1(commit) => { + let avidm_param = init_avidm_param(GENESIS_VID_NUM_STORAGE_NODES)?; + let weights = vec![1; GENESIS_VID_NUM_STORAGE_NODES]; + let ns_table = parse_ns_table(bytes.len(), &leaf.block_header().metadata().encode()); + + let (calculated_commit, mut shares) = + AvidMScheme::ns_disperse(&avidm_param, &weights, &bytes, ns_table).unwrap(); + + ensure!( + calculated_commit == commit, + "computed VID commit {} for genesis block does not match header commit {}", + calculated_commit, + commit + ); + + Ok(( + VidCommonQueryData::new(leaf.block_header().clone(), None), + VidShare::V1(shares.remove(0)), + )) + }, + } } /// A data source with an atomic transaction-based synchronization interface. diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index 8258f25878..f7d1fc8897 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -65,7 +65,7 @@ pub async fn main() -> anyhow::Result<()> { genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await }, @@ -75,7 +75,7 @@ pub async fn main() -> anyhow::Result<()> { genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await }, @@ -85,7 +85,7 @@ pub async fn main() -> anyhow::Result<()> { genesis, modules, opt, - SequencerVersions::::new(), + SequencerVersions::::new(), ) .await }, diff --git a/types/src/v0/impls/instance_state.rs b/types/src/v0/impls/instance_state.rs index f35c80d5c2..4fdfbd67df 100644 --- a/types/src/v0/impls/instance_state.rs +++ b/types/src/v0/impls/instance_state.rs @@ -5,7 +5,10 @@ use vbs::version::Version; #[cfg(any(test, feature = "testing"))] use vbs::version::{StaticVersion, StaticVersionType}; -use super::{state::ValidatedState, traits::StateCatchup, v0_99::ChainConfig, GenesisHeader, L1BlockInfo, L1Client, PubKey, Timestamp, Upgrade, UpgradeMode, UpgradeType}; +use super::{ + state::ValidatedState, traits::StateCatchup, v0_99::ChainConfig, GenesisHeader, L1BlockInfo, + L1Client, PubKey, Timestamp, Upgrade, UpgradeMode, UpgradeType, +}; /// Represents the immutable state of a node. /// From a72e621e4d29ae03ea6ba7542658be735f5ab02a Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 11:01:45 +0500 Subject: [PATCH 114/120] remove todo --- builder/src/bin/permissionless-builder.rs | 1 - sequencer/src/genesis.rs | 3 +-- sequencer/src/run.rs | 5 ++--- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 771e5308eb..5a46e96c04 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -130,7 +130,6 @@ async fn main() -> anyhow::Result<()> { ) .await } - // TODO change `fee` to `pos` (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { run::>( genesis, opt diff --git a/sequencer/src/genesis.rs b/sequencer/src/genesis.rs index c2945e2dcc..983c199f7f 100644 --- a/sequencer/src/genesis.rs +++ b/sequencer/src/genesis.rs @@ -103,8 +103,7 @@ impl Genesis { // now iterate over each upgrade type and validate the fee contract if it exists for (version, upgrade) in &self.upgrades { let chain_config = &upgrade.upgrade_type.chain_config(); - // Is this not an error case? Isn't a chain config a - // requirement? At least for most versions? + if chain_config.is_none() { continue; } diff --git a/sequencer/src/run.rs b/sequencer/src/run.rs index f7d1fc8897..8d21da6a9b 100644 --- a/sequencer/src/run.rs +++ b/sequencer/src/run.rs @@ -58,7 +58,6 @@ pub async fn main() -> anyhow::Result<()> { ) .await }, - // TODO change `fee` to `pos` #[cfg(all(feature = "fee", feature = "marketplace"))] (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { run( @@ -70,7 +69,7 @@ pub async fn main() -> anyhow::Result<()> { .await }, #[cfg(feature = "fee")] - (FeeVersion::VERSION, _) => { + (espresso_types::FeeVersion::VERSION, _) => { run( genesis, modules, @@ -80,7 +79,7 @@ pub async fn main() -> anyhow::Result<()> { .await }, #[cfg(feature = "marketplace")] - (MarketplaceVersion::VERSION, _) => { + (espresso_types::MarketplaceVersion::VERSION, _) => { run( genesis, modules, From ef8f1ad846f5fd227e3a56df1c2e1356ffc9d209 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 11:12:53 +0500 Subject: [PATCH 115/120] enable fee,pos,marketplace features for build-test-artifcats-postgres --- .github/workflows/test.yml | 8 +-- flake.lock | 103 +++++++------------------------------ 2 files changed, 22 insertions(+), 89 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cd9c81f7c6..c1a7153c47 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -51,7 +51,7 @@ jobs: comment_on_pr: false - name: Build and archive tests - run: cargo nextest archive --locked --workspace --archive-file nextest-archive-postgres.tar.zst + run: cargo nextest archive --features "fee, pos, marketplace" --locked --workspace --archive-file nextest-archive-postgres.tar.zst - name: Upload archive to workflow uses: actions/upload-artifact@v4 @@ -86,7 +86,7 @@ jobs: comment_on_pr: false - name: Build and archive tests - run: cargo nextest archive --locked --features "embedded-db testing" --workspace --archive-file nextest-archive-sqlite.tar.zst + run: cargo nextest archive --locked --features "fee, pos, marketplace, embedded-db, testing" --workspace --archive-file nextest-archive-sqlite.tar.zst - name: Upload archive to workflow uses: actions/upload-artifact@v4 @@ -110,8 +110,8 @@ jobs: - name: Build Bins run: | - cargo build --features "fee,pos,marketplace" --locked --profile test --bins - cargo build --features "fee,pos,marketplace" --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target + cargo build --features "fee, pos, marketplace" --locked --profile test --bins + cargo build --features "fee, pos, marketplace" --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target timeout-minutes: 60 - name: Upload archive to workflow diff --git a/flake.lock b/flake.lock index 9c6891b0fe..5422bae20f 100644 --- a/flake.lock +++ b/flake.lock @@ -51,21 +51,6 @@ } }, "flake-utils_2": { - "locked": { - "lastModified": 1644229661, - "narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_3": { "inputs": { "systems": "systems_2" }, @@ -83,7 +68,7 @@ "type": "github" } }, - "flake-utils_4": { + "flake-utils_3": { "inputs": { "systems": "systems_3" }, @@ -101,26 +86,6 @@ "type": "github" } }, - "foundry-nix": { - "inputs": { - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs" - }, - "locked": { - "lastModified": 1740993113, - "narHash": "sha256-XY6CUZft7wjB/cbLyi/xeOZHh2mizSAT0EaYo9wuRXI=", - "owner": "shazow", - "repo": "foundry.nix", - "rev": "ed2a08376f14c0caf2b97418c91a66872e5ab3e2", - "type": "github" - }, - "original": { - "owner": "shazow", - "ref": "monthly", - "repo": "foundry.nix", - "type": "github" - } - }, "gitignore": { "inputs": { "nixpkgs": [ @@ -144,22 +109,24 @@ }, "nixpkgs": { "locked": { - "lastModified": 1666753130, - "narHash": "sha256-Wff1dGPFSneXJLI2c0kkdWTgxnQ416KE6X4KnFkgPYQ=", + "lastModified": 1741246872, + "narHash": "sha256-Q6pMP4a9ed636qilcYX8XUguvKl/0/LGXhHcRI91p0U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f540aeda6f677354f1e7144ab04352f61aaa0118", + "rev": "10069ef4cf863633f57238f179a0297de84bd8d3", "type": "github" }, "original": { - "id": "nixpkgs", - "type": "indirect" + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" } }, "nixpkgs-cross-overlay": { "inputs": { - "flake-utils": "flake-utils_3", - "nixpkgs": "nixpkgs_3", + "flake-utils": "flake-utils_2", + "nixpkgs": "nixpkgs_2", "rust-overlay": "rust-overlay", "treefmt-nix": "treefmt-nix" }, @@ -177,39 +144,7 @@ "type": "github" } }, - "nixpkgs-legacy-foundry": { - "locked": { - "lastModified": 1736798957, - "narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3", - "type": "github" - } - }, "nixpkgs_2": { - "locked": { - "lastModified": 1741246872, - "narHash": "sha256-Q6pMP4a9ed636qilcYX8XUguvKl/0/LGXhHcRI91p0U=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "10069ef4cf863633f57238f179a0297de84bd8d3", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_3": { "locked": { "lastModified": 1733550349, "narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=", @@ -225,7 +160,7 @@ "type": "github" } }, - "nixpkgs_4": { + "nixpkgs_3": { "locked": { "lastModified": 1730768919, "narHash": "sha256-8AKquNnnSaJRXZxc5YmF/WfmxiHX6MMZZasRP6RRQkE=", @@ -241,7 +176,7 @@ "type": "github" } }, - "nixpkgs_5": { + "nixpkgs_4": { "locked": { "lastModified": 1736320768, "narHash": "sha256-nIYdTAiKIGnFNugbomgBJR+Xv5F1ZQU+HfaBqJKroC0=", @@ -257,7 +192,7 @@ "type": "github" } }, - "nixpkgs_6": { + "nixpkgs_5": { "locked": { "lastModified": 1682516527, "narHash": "sha256-1joLG1A4mwhMrj4XVp0mBTNIHphVQSEMIlZ50t0Udxk=", @@ -276,7 +211,7 @@ "inputs": { "flake-compat": "flake-compat_2", "gitignore": "gitignore", - "nixpkgs": "nixpkgs_4" + "nixpkgs": "nixpkgs_3" }, "locked": { "lastModified": 1735882644, @@ -296,10 +231,8 @@ "inputs": { "flake-compat": "flake-compat", "flake-utils": "flake-utils", - "foundry-nix": "foundry-nix", - "nixpkgs": "nixpkgs_2", + "nixpkgs": "nixpkgs", "nixpkgs-cross-overlay": "nixpkgs-cross-overlay", - "nixpkgs-legacy-foundry": "nixpkgs-legacy-foundry", "pre-commit-hooks": "pre-commit-hooks", "rust-overlay": "rust-overlay_2", "solc-bin": "solc-bin" @@ -328,7 +261,7 @@ }, "rust-overlay_2": { "inputs": { - "nixpkgs": "nixpkgs_5" + "nixpkgs": "nixpkgs_4" }, "locked": { "lastModified": 1740104932, @@ -346,8 +279,8 @@ }, "solc-bin": { "inputs": { - "flake-utils": "flake-utils_4", - "nixpkgs": "nixpkgs_6" + "flake-utils": "flake-utils_3", + "nixpkgs": "nixpkgs_5" }, "locked": { "lastModified": 1733347147, From 6a7185763548d521b8062bf71a35c68ea7938af6 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 11:18:31 +0500 Subject: [PATCH 116/120] fix test-integration workflow --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c1a7153c47..54229483b2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -209,7 +209,7 @@ jobs: needs: [build-test-bins, build-test-artifacts-postgres] strategy: matrix: - version: [02, 03, 99] + version: [02,03,99] include: - version: 02 compose: "-f process-compose.yaml" From 4c816b99df80852806c6edcc3fa0e63c6610be57 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 11:55:07 +0500 Subject: [PATCH 117/120] add features to permissionless builder --- builder/Cargo.toml | 6 ++++++ builder/src/bin/permissionless-builder.rs | 20 +++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/builder/Cargo.toml b/builder/Cargo.toml index 7890ac2338..b378e0f688 100644 --- a/builder/Cargo.toml +++ b/builder/Cargo.toml @@ -5,6 +5,12 @@ version = { workspace = true } authors = { workspace = true } edition = { workspace = true } +[features] +default = ["pos"] +fee = [] +pos = [] +marketplace = [] + [dependencies] anyhow = { workspace = true } async-broadcast = { workspace = true } diff --git a/builder/src/bin/permissionless-builder.rs b/builder/src/bin/permissionless-builder.rs index 5a46e96c04..fa5e9f734e 100644 --- a/builder/src/bin/permissionless-builder.rs +++ b/builder/src/bin/permissionless-builder.rs @@ -117,12 +117,14 @@ async fn main() -> anyhow::Result<()> { let upgrade = genesis.upgrade_version; match (base, upgrade) { + #[cfg(all(feature = "fee", feature = "pos"))] (espresso_types::FeeVersion::VERSION, espresso_types::EpochVersion::VERSION) => { run::>( genesis, opt ) .await } + #[cfg(feature = "pos")] (espresso_types::EpochVersion::VERSION, _) => { run::>( genesis, opt @@ -130,12 +132,28 @@ async fn main() -> anyhow::Result<()> { ) .await } + #[cfg(all(feature = "fee", feature = "marketplace"))] (espresso_types::FeeVersion::VERSION, espresso_types::MarketplaceVersion::VERSION) => { run::>( genesis, opt ) .await - } + }, + #[cfg(feature = "fee")] + (espresso_types::FeeVersion::VERSION, _) => { + run::>( + genesis, opt + ) + .await + }, + #[cfg(feature = "marketplace")] + (espresso_types::MarketplaceVersion::VERSION, _) => { + run::>( + genesis, opt + ) + .await + + }, _ => panic!( "Invalid base ({base}) and upgrade ({upgrade}) versions specified in the toml file." ), From c077ba9edd16bcee339c58139bc784e3861ef54e Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 11:59:20 +0500 Subject: [PATCH 118/120] build docker binaries will all version features --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 60e36265d7..8ee74023e9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -45,10 +45,10 @@ jobs: - name: Build # Build in release without `testing` feature, this should work without `hotshot_example` config. run: | - cargo build --locked --release --workspace + cargo build --features "fee, pos, marketplace" --locked --release --workspace - name: Build sequencer-sqlite - run: cargo build --locked --release --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target + run: cargo build --features "fee, pos, marketplace" --locked --release --manifest-path ./sequencer-sqlite/Cargo.toml --target-dir ./target - name: Build Espresso Dev Node # Espresso Dev Node currently requires testing feature, so it is built separately. From e807c9984620ccd8c6c3f4dc01079a9d168c4ec5 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 12:22:58 +0500 Subject: [PATCH 119/120] remove hardcoded version --- client/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/src/lib.rs b/client/src/lib.rs index 277ee535c8..34d84c1d6f 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -55,7 +55,7 @@ impl SequencerClient { height: u64, ) -> anyhow::Result>> { self.0 - .socket(&format!("v0/availability/stream/headers/{height}")) + .socket(&format!("availability/stream/headers/{height}")) .subscribe::
() .await .context("subscribing to Espresso headers") From 6e73cd890d51d48fc772ccfeaa300a9d54408c73 Mon Sep 17 00:00:00 2001 From: imabdulbasit Date: Mon, 10 Mar 2025 17:33:01 +0500 Subject: [PATCH 120/120] pass genesis file env --- .github/workflows/test.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 54229483b2..787aed326c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -212,12 +212,15 @@ jobs: version: [02,03,99] include: - version: 02 + genesis: "data/genesis/demo.toml" compose: "-f process-compose.yaml" - version: 03 + genesis: "data/genesis/demo-pos.toml" compose: "-f process-compose.yaml" - version: 99 + genesis: "data/genesis/demo-marketplace.toml" compose: "-f process-compose.yaml -f process-compose-mp.yml" fail-fast: false runs-on: ubuntu-latest @@ -246,7 +249,7 @@ jobs: run: nix profile install nixpkgs#process-compose - name: Run Demo-Native ${{matrix.version}} - run: bash -x scripts/demo-native ${{matrix.compose}} --tui=false > ${{ env.PC_LOGS }} 2>&1 & + run: bash -x ESPRESSO_SEQUENCER_GENESIS_FILE=${{matrix.genesis}} scripts/demo-native ${{matrix.compose}} --tui=false > ${{ env.PC_LOGS }} 2>&1 & - name: Test Integration env: