diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 1086e9b..b18d5e8 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -17,7 +17,7 @@ use crate::{ tx::TxResult, }, header::Header, - pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, + pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, @@ -203,7 +203,7 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn validate_pruning_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + fn validate_pruning_proof(&self, proof: &PruningPointProof, proof_metadata: &PruningProofMetadata) -> PruningImportResult<()> { unimplemented!() } diff --git a/consensus/core/src/errors/pruning.rs b/consensus/core/src/errors/pruning.rs index 966d41d..0211f2b 100644 --- a/consensus/core/src/errors/pruning.rs +++ b/consensus/core/src/errors/pruning.rs @@ -59,6 +59,9 @@ pub enum PruningImportError { #[error("process exit was initiated while validating pruning point proof")] PruningValidationInterrupted, + + #[error("block {0} at level {1} has invalid proof of work for level")] + ProofOfWorkFailed(Hash, BlockLevel), } pub type PruningImportResult = std::result::Result; diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index d235ca7..b425156 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -41,6 +41,10 @@ pub mod utxo; /// overall blocks, so 2^192 is definitely a justified upper-bound. pub type BlueWorkType = spectre_math::Uint192; +/// The extends directly from the expectation above about having no more than +/// 2^128 work in a single block +pub const MAX_WORK_LEVEL: BlockLevel = 128; + /// The type used to represent the GHOSTDAG K parameter pub type KType = u16; diff --git a/consensus/core/src/pruning.rs b/consensus/core/src/pruning.rs index 754fec9..d50beaf 100644 --- a/consensus/core/src/pruning.rs +++ b/consensus/core/src/pruning.rs @@ -1,6 +1,7 @@ use crate::{ header::Header, trusted::{TrustedGhostdagData, TrustedHeader}, + BlueWorkType, }; use spectre_hashes::Hash; use std::sync::Arc; @@ -19,3 +20,15 @@ pub struct PruningPointTrustedData { /// Union of GHOSTDAG data required to verify blocks in the future of the pruning point pub ghostdag_blocks: Vec, } + +#[derive(Clone, Copy)] +pub struct PruningProofMetadata { + /// The claimed work of the initial relay block (from the prover) + pub relay_block_blue_work: BlueWorkType, +} + +impl PruningProofMetadata { + pub fn new(relay_block_blue_work: BlueWorkType) -> Self { + Self { relay_block_blue_work } + } +} diff --git a/consensus/pow/src/lib.rs b/consensus/pow/src/lib.rs index f22f0de..065b206 100644 --- a/consensus/pow/src/lib.rs +++ b/consensus/pow/src/lib.rs @@ -56,12 +56,22 @@ impl State { } pub fn calc_block_level(header: &Header, max_block_level: BlockLevel) -> BlockLevel { + let (block_level, _) = calc_block_level_check_pow(header, max_block_level); + block_level +} + +pub fn calc_block_level_check_pow(header: &Header, max_block_level: BlockLevel) -> (BlockLevel, bool) { if header.parents_by_level.is_empty() { - return max_block_level; // Genesis has the max block level + return (max_block_level, true); // Genesis has the max block level } let state = State::new(header); - let (_, pow) = state.check_pow(header.nonce); + let (passed, pow) = state.check_pow(header.nonce); + let block_level = calc_level_from_pow(pow, max_block_level); + (block_level, passed) +} + +pub fn calc_level_from_pow(pow: Uint256, max_block_level: BlockLevel) -> BlockLevel { let signed_block_level = max_block_level as i64 - pow.bits() as i64; max(signed_block_level, 0) as BlockLevel } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index f3f28f0..fb5d618 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -64,7 +64,7 @@ use spectre_consensus_core::{ merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, - pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, + pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, @@ -758,8 +758,12 @@ impl ConsensusApi for Consensus { calc_hash_merkle_root(txs.iter(), storage_mass_activated) } - fn validate_pruning_proof(&self, proof: &PruningPointProof) -> Result<(), PruningImportError> { - self.services.pruning_proof_manager.validate_pruning_point_proof(proof) + fn validate_pruning_proof( + &self, + proof: &PruningPointProof, + proof_metadata: &PruningProofMetadata, + ) -> Result<(), PruningImportError> { + self.services.pruning_proof_manager.validate_pruning_point_proof(proof, proof_metadata) } fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> { diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index b0b6d1a..394af55 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -118,7 +118,6 @@ impl ConsensusServices { relations_services[0].clone(), storage.headers_store.clone(), reachability_service.clone(), - false, ); let coinbase_manager = CoinbaseManager::new( diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index e07a303..40e6076 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -9,7 +9,7 @@ use spectre_consensus_core::header::Header; use spectre_consensus_core::BlockLevel; use spectre_core::time::unix_now; use spectre_database::prelude::StoreResultExtensions; -use std::cmp::max; +use spectre_pow::calc_level_from_pow; impl HeaderProcessor { /// Validates the header in isolation including pow check against header declared bits. @@ -102,8 +102,7 @@ impl HeaderProcessor { let state = spectre_pow::State::new(header); let (passed, pow) = state.check_pow(header.nonce); if passed || self.skip_proof_of_work { - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - Ok(max(signed_block_level, 0) as BlockLevel) + Ok(calc_level_from_pow(pow, self.max_block_level)) } else { Err(RuleError::InvalidPoW) } diff --git a/consensus/src/processes/difficulty.rs b/consensus/src/processes/difficulty.rs index 4253e67..a448d9c 100644 --- a/consensus/src/processes/difficulty.rs +++ b/consensus/src/processes/difficulty.rs @@ -6,7 +6,7 @@ use crate::model::stores::{ use spectre_consensus_core::{ config::params::MIN_DIFFICULTY_WINDOW_LEN, errors::difficulty::{DifficultyError, DifficultyResult}, - BlockHashSet, BlueWorkType, + BlockHashSet, BlueWorkType, MAX_WORK_LEVEL, }; use spectre_math::{Uint256, Uint320}; use std::{ @@ -282,6 +282,16 @@ pub fn calc_work(bits: u32) -> BlueWorkType { res.try_into().expect("Work should not exceed 2**192") } +pub fn level_work(level: u8, max_block_level: u8) -> BlueWorkType { + // Need to make a special condition for level 0 to ensure true work is always used + if level == 0 { + return 0.into(); + } + // We use 256 here so the result corresponds to the work at the level from calc_level_from_pow + let exp = (level as u32) + 256 - (max_block_level as u32); + BlueWorkType::from_u64(1) << exp.min(MAX_WORK_LEVEL as u32) +} + #[derive(Eq)] struct DifficultyBlock { timestamp: u64, @@ -307,3 +317,55 @@ impl Ord for DifficultyBlock { self.timestamp.cmp(&other.timestamp).then_with(|| self.sortable_block.cmp(&other.sortable_block)) } } + +#[cfg(test)] +mod tests { + use spectre_consensus_core::{BlockLevel, BlueWorkType, MAX_WORK_LEVEL}; + use spectre_math::{Uint256, Uint320}; + use spectre_pow::calc_level_from_pow; + + use crate::processes::difficulty::{calc_work, level_work}; + use spectre_utils::hex::ToHex; + + #[test] + fn test_target_levels() { + let max_block_level: BlockLevel = 225; + for level in 1..=max_block_level { + // required pow for level + let level_target = (Uint320::from_u64(1) << (max_block_level - level).max(MAX_WORK_LEVEL) as u32) - Uint320::from_u64(1); + let level_target = Uint256::from_be_bytes(level_target.to_be_bytes()[8..40].try_into().unwrap()); + let calculated_level = calc_level_from_pow(level_target, max_block_level); + + let true_level_work = calc_work(level_target.compact_target_bits()); + let calc_level_work = level_work(level, max_block_level); + + // A "good enough" estimate of level work is within 1% diff from work with actual level target + // It's hard to calculate percentages with these large numbers, so to get around using floats + // we multiply the difference by 100. if the result is <= the calc_level_work it means + // difference must have been less than 1% + let (percent_diff, overflowed) = (true_level_work - calc_level_work).overflowing_mul(BlueWorkType::from_u64(100)); + let is_good_enough = percent_diff <= calc_level_work; + + println!("Level {}:", level); + println!( + " data | {} | {} | {} / {} |", + level_target.compact_target_bits(), + level_target.bits(), + calculated_level, + max_block_level + ); + println!(" pow | {}", level_target.to_hex()); + println!(" work | 0000000000000000{}", true_level_work.to_hex()); + println!(" lvwork | 0000000000000000{}", calc_level_work.to_hex()); + println!(" diff<1% | {}", !overflowed && (is_good_enough)); + + assert!(is_good_enough); + } + } + + #[test] + fn test_base_level_work() { + // Expect that at level 0, the level work is always 0 + assert_eq!(BlueWorkType::from(0), level_work(0, 255)); + } +} diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 6272660..c657ac5 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use spectre_consensus_core::{ blockhash::{self, BlockHashExtensions, BlockHashes}, - BlockHashMap, BlueWorkType, HashMapCustomHasher, + BlockHashMap, BlockLevel, BlueWorkType, HashMapCustomHasher, }; use spectre_hashes::Hash; use spectre_utils::refs::Refs; @@ -16,7 +16,7 @@ use crate::{ relations::RelationsStoreReader, }, }, - processes::difficulty::calc_work, + processes::difficulty::{calc_work, level_work}, }; use super::ordering::*; @@ -29,7 +29,15 @@ pub struct GhostdagManager, pub(super) reachability_service: U, - use_score_as_work: bool, + + /// Level work is a lower-bound for the amount of work represented by each block. + /// When running GD for higher-level sub-DAGs, this value should be set accordingly + /// to the work represented by that level, and then used as a lower bound + /// for the work calculated from header bits (which depends on current difficulty). + /// For instance, assuming level 80 (i.e., pow hash has at least 80 zeros) is always + /// above the difficulty target, all blocks in it should represent the same amount of + /// work regardless of whether current difficulty requires 20 zeros or 25 zeros. + level_work: BlueWorkType, } impl GhostdagManager { @@ -40,9 +48,30 @@ impl, reachability_service: U, - use_score_as_work: bool, ) -> Self { - Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, use_score_as_work } + // For ordinary GD, always keep level_work=0 so the lower bound is ineffective + Self { genesis_hash, k, ghostdag_store, relations_store, reachability_service, headers_store, level_work: 0.into() } + } + + pub fn with_level( + genesis_hash: Hash, + k: KType, + ghostdag_store: Arc, + relations_store: S, + headers_store: Arc, + reachability_service: U, + level: BlockLevel, + max_block_level: BlockLevel, + ) -> Self { + Self { + genesis_hash, + k, + ghostdag_store, + relations_store, + reachability_service, + headers_store, + level_work: level_work(level, max_block_level), + } } pub fn genesis_ghostdag_data(&self) -> GhostdagData { @@ -115,20 +144,21 @@ impl = Default::default(); let mut visited = BlockHashSet::new(); for child in relations_service.get_children(root).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, - })); + topological_heap + .push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work })); } let mut has_required_block = required_block.is_some_and(|required_block| root == required_block); @@ -378,11 +376,8 @@ impl PruningProofManager { ghostdag_store.insert(current_hash, Arc::new(current_gd)).unwrap_or_exists(); for child in relations_service.get_children(current_hash).unwrap().read().iter().copied() { - topological_heap.push(Reverse(SortableBlock { - hash: child, - // It's important to use here blue work and not score so we can iterate the heap in a way that respects the topology - blue_work: self.headers_store.get_header(child).unwrap().blue_work, - })); + topological_heap + .push(Reverse(SortableBlock { hash: child, blue_work: self.headers_store.get_header(child).unwrap().blue_work })); } } diff --git a/consensus/src/processes/pruning_proof/validate.rs b/consensus/src/processes/pruning_proof/validate.rs index 0cfa958..d779bf2 100644 --- a/consensus/src/processes/pruning_proof/validate.rs +++ b/consensus/src/processes/pruning_proof/validate.rs @@ -4,21 +4,20 @@ use std::{ }; use itertools::Itertools; +use parking_lot::lock_api::RwLock; +use rocksdb::WriteBatch; use spectre_consensus_core::{ blockhash::{BlockHashExtensions, BlockHashes, ORIGIN}, errors::pruning::{PruningImportError, PruningImportResult}, header::Header, - pruning::PruningPointProof, + pruning::{PruningPointProof, PruningProofMetadata}, BlockLevel, }; use spectre_core::info; use spectre_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions}; use spectre_hashes::Hash; -use spectre_math::int::SignedInteger; -use spectre_pow::calc_block_level; +use spectre_pow::{calc_block_level, calc_block_level_check_pow}; use spectre_utils::vec::VecExtensions; -use parking_lot::lock_api::RwLock; -use rocksdb::WriteBatch; use crate::{ model::{ @@ -26,6 +25,7 @@ use crate::{ stores::{ ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagStore, GhostdagStoreReader}, headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers_selected_tip::HeadersSelectedTipStoreReader, pruning::PruningStoreReader, reachability::{DbReachabilityStore, ReachabilityStoreReader}, relations::{DbRelationsStore, RelationsStoreReader}, @@ -37,7 +37,11 @@ use crate::{ use super::{PruningProofManager, TempProofContext}; impl PruningProofManager { - pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { + pub fn validate_pruning_point_proof( + &self, + proof: &PruningPointProof, + proof_metadata: &PruningProofMetadata, + ) -> PruningImportResult<()> { if proof.len() != self.max_block_level as usize + 1 { return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1)); } @@ -74,6 +78,13 @@ impl PruningProofManager { let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); + // The accumulated blue work of current consensus from the pruning point onward + let pruning_period_work = + self.headers_selected_tip_store.read().get().unwrap().blue_work.saturating_sub(current_pp_header.blue_work); + // The claimed blue work of the prover from his pruning point and up to the triggering relay block. This work + // will eventually be verified if the proof is accepted so we can treat it as trusted + let prover_claimed_pruning_period_work = proof_metadata.relay_block_blue_work.saturating_sub(proof_pp_header.blue_work); + for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() { let level = level_idx as BlockLevel; self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?; @@ -90,7 +101,6 @@ impl PruningProofManager { // we can determine if the proof is better. The proof is better if the blue work* difference between the // old current consensus's tips and the common ancestor is less than the blue work difference between the // proof's tip and the common ancestor. - // *Note: blue work is the same as blue score on levels higher than 0 if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data( &proof_ghostdag_stores, ¤t_consensus_ghostdag_stores, @@ -98,13 +108,13 @@ impl PruningProofManager { level, proof_selected_tip_gd, ) { - let selected_tip_blue_work_diff = - SignedInteger::from(proof_selected_tip_gd.blue_work) - SignedInteger::from(proof_common_ancestor_gd.blue_work); + let proof_level_blue_work_diff = proof_selected_tip_gd.blue_work.saturating_sub(proof_common_ancestor_gd.blue_work); for parent in self.parents_manager.parents_at_level(¤t_pp_header, level).iter().copied() { let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap(); - let parent_blue_work_diff = - SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work); - if parent_blue_work_diff >= selected_tip_blue_work_diff { + let parent_blue_work_diff = parent_blue_work.saturating_sub(common_ancestor_gd.blue_work); + if parent_blue_work_diff.saturating_add(pruning_period_work) + >= proof_level_blue_work_diff.saturating_add(prover_claimed_pruning_period_work) + { return Err(PruningImportError::PruningProofInsufficientBlueWork); } } @@ -187,14 +197,15 @@ impl PruningProofManager { .cloned() .enumerate() .map(|(level, ghostdag_store)| { - GhostdagManager::new( + GhostdagManager::with_level( self.genesis_hash, self.ghostdag_k, ghostdag_store, relations_stores[level].clone(), headers_store.clone(), reachability_services[level].clone(), - level != 0, + level as BlockLevel, + self.max_block_level, ) }) .collect_vec(); @@ -242,10 +253,13 @@ impl PruningProofManager { let level_idx = level as usize; let mut selected_tip = None; for (i, header) in proof[level as usize].iter().enumerate() { - let header_level = calc_block_level(header, self.max_block_level); + let (header_level, pow_passes) = calc_block_level_check_pow(header, self.max_block_level); if header_level < level { return Err(PruningImportError::PruningProofWrongBlockLevel(header.hash, header_level, level)); } + if !pow_passes { + return Err(PruningImportError::ProofOfWorkFailed(header.hash, level)); + } headers_store.insert(header.hash, header.clone(), header_level).unwrap_or_exists(); diff --git a/math/src/uint.rs b/math/src/uint.rs index 379fc18..02f44b2 100644 --- a/math/src/uint.rs +++ b/math/src/uint.rs @@ -158,6 +158,18 @@ macro_rules! construct_uint { (self, carry) } + #[inline] + pub fn saturating_sub(self, other: Self) -> Self { + let (sum, carry) = self.overflowing_sub(other); + if carry { Self::ZERO } else { sum } + } + + #[inline] + pub fn saturating_add(self, other: Self) -> Self { + let (sum, carry) = self.overflowing_add(other); + if carry { Self::MAX } else { sum } + } + /// Multiplication by u64 #[inline] pub fn overflowing_mul_u64(self, other: u64) -> (Self, bool) { @@ -1150,6 +1162,19 @@ mod tests { } } + #[test] + fn test_saturating_ops() { + let u1 = Uint128::from_u128(u128::MAX); + let u2 = Uint128::from_u64(u64::MAX); + // Sub + assert_eq!(u1.saturating_sub(u2), Uint128::from_u128(u128::MAX - u64::MAX as u128)); + assert_eq!(u1.saturating_sub(u2).as_u128(), u128::MAX - u64::MAX as u128); + assert_eq!(u2.saturating_sub(u1), Uint128::ZERO); + // Add + assert_eq!(u1.saturating_add(Uint128::from_u64(1)), Uint128::MAX); + assert_eq!(u2.saturating_add(Uint128::from_u64(1)), Uint128::from_u128(u64::MAX as u128 + 1)); + } + #[test] fn test_mod_inv() { use core::cmp::Ordering; diff --git a/protocol/flows/src/v5/ibd/flow.rs b/protocol/flows/src/v5/ibd/flow.rs index 1146435..727d1a2 100644 --- a/protocol/flows/src/v5/ibd/flow.rs +++ b/protocol/flows/src/v5/ibd/flow.rs @@ -10,7 +10,7 @@ use spectre_consensus_core::{ api::BlockValidationFuture, block::Block, header::Header, - pruning::{PruningPointProof, PruningPointsList}, + pruning::{PruningPointProof, PruningPointsList, PruningProofMetadata}, BlockHashSet, }; use spectre_consensusmanager::{spawn_blocking, ConsensusProxy, StagingConsensus}; @@ -218,7 +218,7 @@ impl IbdFlow { let staging_session = staging.session().await; - let pruning_point = self.sync_and_validate_pruning_proof(&staging_session).await?; + let pruning_point = self.sync_and_validate_pruning_proof(&staging_session, relay_block).await?; self.sync_headers(&staging_session, syncer_virtual_selected_parent, pruning_point, relay_block).await?; staging_session.async_validate_pruning_points().await?; self.validate_staging_timestamps(&self.ctx.consensus().session().await, &staging_session).await?; @@ -226,7 +226,7 @@ impl IbdFlow { Ok(()) } - async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy) -> Result { + async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy, relay_block: &Block) -> Result { self.router.enqueue(make_message!(Payload::RequestPruningPointProof, RequestPruningPointProofMessage {})).await?; // Pruning proof generation and communication might take several minutes, so we allow a long 10 minute timeout @@ -234,11 +234,14 @@ impl IbdFlow { let proof: PruningPointProof = msg.try_into()?; debug!("received proof with overall {} headers", proof.iter().map(|l| l.len()).sum::()); + let proof_metadata = PruningProofMetadata::new(relay_block.header.blue_work); + // Get a new session for current consensus (non staging) let consensus = self.ctx.consensus().session().await; // The proof is validated in the context of current consensus - let proof = consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof).map(|()| proof)).await?; + let proof = + consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof, &proof_metadata).map(|()| proof)).await?; let proof_pruning_point = proof[0].last().expect("was just ensured by validation").hash; @@ -316,7 +319,7 @@ impl IbdFlow { if mismatch_detected { info!("Validating the locally built proof (sanity test fallback #2)"); // Note: the proof is validated in the context of *current* consensus - if let Err(err) = con.validate_pruning_proof(&built_proof) { + if let Err(err) = con.validate_pruning_proof(&built_proof, &proof_metadata) { panic!("Locally built proof failed validation: {}", err); } info!("Locally built proof was validated successfully");