From a06efabb6e14c4a5e2d35d28284e743cce17afbc Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Tue, 11 Jul 2023 13:56:09 +0200 Subject: [PATCH] Asynchronous-backing compatible Aura, not plugged in (#2573) * rough draft of potential parent search * get things compiling * fmt * add new function to all RelayChainInterface implementations * fix compilation * set slot and timestamp based on relay parent, prepare for find-parent * skeleton of new aura logic * fmt * introduce a collator module in the Aura crate * extract different implementations into own modules * make interface more convenient * docs and todos for lookahead * refactor basic collator to use new collator utility * some more refactoring * finish most of the control flow for new aura * introduce backend as parameter * fix compilation * fix a couple more TODOs * add an `announce_block` function to collator service * announce with barrier * rename block announcement validator to be more specific * fmt * clean up unused import errors * update references to BlockAnnounceValidator * rename unstable_reimpl * add AuraUnincludedSegmentApi * finish rename * integrate AuraUnincludedSegmentApi * add a new block announcement validator for backwards compatibility * add some naive equivocation defenses * rustfmt * clean up remaining TODO [now]s * fmt * try to fix inprocess-interface * actually fix compilation * ignored -> rejected rephrase * fix test compilation * fmt * clippy --- Cargo.lock | 19 + Cargo.toml | 1 + client/collator/src/service.rs | 21 +- client/consensus/aura/Cargo.toml | 3 + client/consensus/aura/src/collator.rs | 363 ++++++++++++ client/consensus/aura/src/collators/basic.rs | 200 +++++++ .../consensus/aura/src/collators/lookahead.rs | 346 ++++++++++++ client/consensus/aura/src/collators/mod.rs | 24 + .../aura/src/equivocation_import_queue.rs | 254 +++++++++ client/consensus/aura/src/lib.rs | 4 +- client/consensus/aura/src/unstable_reimpl.rs | 529 ------------------ client/consensus/common/Cargo.toml | 3 + client/consensus/common/src/lib.rs | 203 ++++++- client/consensus/common/src/tests.rs | 2 +- client/network/src/lib.rs | 69 ++- client/network/src/tests.rs | 8 +- client/relay-chain-interface/src/lib.rs | 2 +- client/service/src/lib.rs | 5 +- primitives/aura/Cargo.toml | 30 + primitives/aura/src/lib.rs | 50 ++ 20 files changed, 1588 insertions(+), 548 deletions(-) create mode 100644 client/consensus/aura/src/collator.rs create mode 100644 client/consensus/aura/src/collators/basic.rs create mode 100644 client/consensus/aura/src/collators/lookahead.rs create mode 100644 client/consensus/aura/src/collators/mod.rs create mode 100644 client/consensus/aura/src/equivocation_import_queue.rs delete mode 100644 client/consensus/aura/src/unstable_reimpl.rs create mode 100644 primitives/aura/Cargo.toml create mode 100644 primitives/aura/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 79131725055..67f5f2b7f7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2578,10 +2578,12 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-common", "cumulus-client-consensus-proposer", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-relay-chain-interface", "futures", + "lru 0.10.0", "parity-scale-codec", "polkadot-node-primitives", "polkadot-overseer", @@ -2589,6 +2591,7 @@ dependencies = [ "sc-client-api", "sc-consensus", "sc-consensus-aura", + "sc-consensus-babe", "sc-consensus-slots", "sc-telemetry", "sp-api", @@ -2624,11 +2627,14 @@ dependencies = [ "polkadot-primitives", "sc-client-api", "sc-consensus", + "sc-consensus-babe", "schnellru", "sp-blockchain", "sp-consensus", + "sp-consensus-slots", "sp-core", "sp-runtime", + "sp-timestamp", "sp-tracing", "sp-trie", "substrate-prometheus-endpoint", @@ -2927,6 +2933,19 @@ dependencies = [ "xcm", ] +[[package]] +name = "cumulus-primitives-aura" +version = "0.1.0" +dependencies = [ + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-primitives", + "sp-api", + "sp-consensus-aura", + "sp-runtime", + "sp-std", +] + [[package]] name = "cumulus-primitives-core" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index a376d06f806..15aa8614948 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ members = [ "pallets/xcmp-queue", "parachain-template/node", "parachain-template/runtime", + "primitives/aura", "primitives/core", "primitives/parachain-inherent", "primitives/timestamp", diff --git a/client/collator/src/service.rs b/client/collator/src/service.rs index 7724b0a68a6..3125fea4248 100644 --- a/client/collator/src/service.rs +++ b/client/collator/src/service.rs @@ -58,12 +58,18 @@ pub trait ServiceInterface { candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)>; - /// Inform networking systems that the block should be announced after an appropriate - /// signal has been received. This returns the sending half of the signal. + /// Inform networking systems that the block should be announced after a signal has + /// been received to indicate the block has been seconded by a relay-chain validator. + /// + /// This sets up the barrier and returns the sending side of a channel, for the signal + /// to be passed through. fn announce_with_barrier( &self, block_hash: Block::Hash, ) -> oneshot::Sender; + + /// Directly announce a block on the network. + fn announce_block(&self, block_hash: Block::Hash, data: Option>); } /// The [`CollatorService`] provides common utilities for parachain consensus and authoring. @@ -74,6 +80,7 @@ pub trait ServiceInterface { pub struct CollatorService { block_status: Arc, wait_to_announce: Arc>>, + announce_block: Arc>) + Send + Sync>, runtime_api: Arc, } @@ -82,6 +89,7 @@ impl Clone for CollatorService { Self { block_status: self.block_status.clone(), wait_to_announce: self.wait_to_announce.clone(), + announce_block: self.announce_block.clone(), runtime_api: self.runtime_api.clone(), } } @@ -101,9 +109,10 @@ where announce_block: Arc>) + Send + Sync>, runtime_api: Arc, ) -> Self { - let wait_to_announce = Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block))); + let wait_to_announce = + Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block.clone()))); - Self { block_status, wait_to_announce, runtime_api } + Self { block_status, wait_to_announce, announce_block, runtime_api } } /// Checks the status of the given block hash in the Parachain. @@ -315,4 +324,8 @@ where ) -> oneshot::Sender { CollatorService::announce_with_barrier(self, block_hash) } + + fn announce_block(&self, block_hash: Block::Hash, data: Option>) { + (self.announce_block)(block_hash, data) + } } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 4ba9ed74a74..c647b258028 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -10,11 +10,13 @@ async-trait = "0.1.71" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } futures = "0.3.28" tracing = "0.1.37" +lru = "0.10.0" # Substrate sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } @@ -35,6 +37,7 @@ substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate cumulus-client-consensus-common = { path = "../common" } cumulus-relay-chain-interface = { path = "../../relay-chain-interface" } cumulus-client-consensus-proposer = { path = "../proposer" } +cumulus-primitives-aura = { path = "../../../primitives/aura" } cumulus-primitives-core = { path = "../../../primitives/core" } cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent" } cumulus-client-collator = { path = "../../collator" } diff --git a/client/consensus/aura/src/collator.rs b/client/consensus/aura/src/collator.rs new file mode 100644 index 00000000000..aa990ae6d3a --- /dev/null +++ b/client/consensus/aura/src/collator.rs @@ -0,0 +1,363 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The core collator logic for Aura - slot claiming, block proposing, and collation +//! packaging. +//! +//! The [`Collator`] struct exposed here is meant to be a component of higher-level logic +//! which actually manages the control flow of the collator - which slots to claim, how +//! many collations to build, when to work, etc. +//! +//! This module also exposes some standalone functions for common operations when building +//! aura-based collators. + +use codec::{Decode, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{ + self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, +}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_core::{ + relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, +}; +use cumulus_primitives_parachain_inherent::ParachainInherentData; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; +use polkadot_primitives::{Header as PHeader, Id as ParaId}; + +use futures::prelude::*; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; +use sc_consensus_aura::standalone as aura_internal; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_consensus::BlockOrigin; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_keystore::KeystorePtr; +use sp_runtime::{ + generic::Digest, + traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, +}; +use sp_state_machine::StorageChanges; +use sp_timestamp::Timestamp; +use std::{convert::TryFrom, error::Error, hash::Hash, sync::Arc, time::Duration}; + +/// Parameters for instantiating a [`Collator`]. +pub struct Params { + /// A builder for inherent data builders. + pub create_inherent_data_providers: CIDP, + /// The block import handle. + pub block_import: BI, + /// An interface to the relay-chain client. + pub relay_client: Arc, + /// The keystore handle used for accessing parachain key material. + pub keystore: KeystorePtr, + /// The identifier of the parachain within the relay-chain. + pub para_id: ParaId, + /// The block proposer used for building blocks. + pub proposer: Proposer, + /// The collator service used for bundling proposals into collations and announcing + /// to the network. + pub collator_service: CS, +} + +/// A utility struct for writing collation logic that makes use of Aura entirely +/// or in part. See module docs for more details. +pub struct Collator { + create_inherent_data_providers: CIDP, + block_import: BI, + relay_client: Arc, + keystore: KeystorePtr, + para_id: ParaId, + proposer: Proposer, + collator_service: CS, + _marker: std::marker::PhantomData<(Block, P)>, +} + +impl Collator +where + Block: BlockT, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + /// Instantiate a new instance of the `Aura` manager. + pub fn new(params: Params) -> Self { + Collator { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + relay_client: params.relay_client, + keystore: params.keystore, + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + _marker: std::marker::PhantomData, + } + } + + /// Explicitly creates the inherent data for parachain block authoring and overrides + /// the timestamp inherent data with the one provided, if any. + pub async fn create_inherent_data( + &self, + relay_parent: PHash, + validation_data: &PersistedValidationData, + parent_hash: Block::Hash, + timestamp: impl Into>, + ) -> Result<(ParachainInherentData, InherentData), Box> { + let paras_inherent_data = ParachainInherentData::create_at( + relay_parent, + &self.relay_client, + validation_data, + self.para_id, + ) + .await; + + let paras_inherent_data = match paras_inherent_data { + Some(p) => p, + None => + return Err( + format!("Could not create paras inherent data at {:?}", relay_parent).into() + ), + }; + + let mut other_inherent_data = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .map_err(|e| e as Box) + .await? + .create_inherent_data() + .await + .map_err(Box::new)?; + + if let Some(timestamp) = timestamp.into() { + other_inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp); + } + + Ok((paras_inherent_data, other_inherent_data)) + } + + /// Propose, seal, and import a block, packaging it into a collation. + /// + /// Provide the slot to build at as well as any other necessary pre-digest logs, + /// the inherent data, and the proposal duration and PoV size limits. + /// + /// The Aura pre-digest should not be explicitly provided and is set internally. + /// + /// This does not announce the collation to the parachain network or the relay chain. + pub async fn collate( + &mut self, + parent_header: &Block::Header, + slot_claim: &SlotClaim, + additional_pre_digest: impl Into>>, + inherent_data: (ParachainInherentData, InherentData), + proposal_duration: Duration, + max_pov_size: usize, + ) -> Result<(Collation, ParachainBlockData, Block::Hash), Box> { + let mut digest = additional_pre_digest.into().unwrap_or_default(); + digest.push(slot_claim.pre_digest.clone()); + + let proposal = self + .proposer + .propose( + &parent_header, + &inherent_data.0, + inherent_data.1, + Digest { logs: digest }, + proposal_duration, + Some(max_pov_size), + ) + .await + .map_err(|e| Box::new(e))?; + + let sealed_importable = seal::<_, _, P>( + proposal.block, + proposal.storage_changes, + &slot_claim.author_pub, + &self.keystore, + )?; + + let post_hash = sealed_importable.post_hash(); + let block = Block::new( + sealed_importable.post_header(), + sealed_importable + .body + .as_ref() + .expect("body always created with this `propose` fn; qed") + .clone(), + ); + + self.block_import.import_block(sealed_importable).await?; + + if let Some((collation, block_data)) = self.collator_service.build_collation( + parent_header, + post_hash, + ParachainCandidate { block, proof: proposal.proof }, + ) { + tracing::info!( + target: crate::LOG_TARGET, + "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", + block_data.header().encode().len() as f64 / 1024f64, + block_data.extrinsics().encode().len() as f64 / 1024f64, + block_data.storage_proof().encode().len() as f64 / 1024f64, + ); + + if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { + tracing::info!( + target: crate::LOG_TARGET, + "Compressed PoV size: {}kb", + pov.block_data.0.len() as f64 / 1024f64, + ); + } + + Ok((collation, block_data, post_hash)) + } else { + Err("Unable to produce collation".to_string().into()) + } + } + + /// Get the underlying collator service. + pub fn collator_service(&self) -> &CS { + &self.collator_service + } +} + +/// A claim on an Aura slot. +pub struct SlotClaim { + author_pub: Pub, + pre_digest: DigestItem, + timestamp: Timestamp, +} + +impl SlotClaim { + /// Create a slot-claim from the given author public key, slot, and timestamp. + /// + /// This does not check whether the author actually owns the slot or the timestamp + /// falls within the slot. + pub fn unchecked

(author_pub: Pub, slot: Slot, timestamp: Timestamp) -> Self + where + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode, + { + SlotClaim { author_pub, timestamp, pre_digest: aura_internal::pre_digest::

(slot) } + } + + /// Get the author's public key. + pub fn author_pub(&self) -> &Pub { + &self.author_pub + } + + /// Get the Aura pre-digest for this slot. + pub fn pre_digest(&self) -> &DigestItem { + &self.pre_digest + } + + /// Get the timestamp corresponding to the relay-chain slot this claim was + /// generated against. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } +} + +/// Attempt to claim a slot derived from the given relay-parent header's slot. +pub async fn claim_slot( + client: &C, + parent_hash: B::Hash, + relay_parent_header: &PHeader, + slot_duration: SlotDuration, + relay_chain_slot_duration: SlotDuration, + keystore: &KeystorePtr, +) -> Result>, Box> +where + B: BlockT, + C: ProvideRuntimeApi + Send + Sync + 'static, + C::Api: AuraApi, + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode, +{ + // load authorities + let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; + + // Determine the current slot and timestamp based on the relay-parent's. + let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp( + relay_parent_header, + relay_chain_slot_duration, + ) { + Some((_, t)) => (Slot::from_timestamp(t, slot_duration), t), + None => return Ok(None), + }; + + // Try to claim the slot locally. + let author_pub = { + let res = aura_internal::claim_slot::

(slot_now, &authorities, keystore).await; + match res { + Some(p) => p, + None => return Ok(None), + } + }; + + Ok(Some(SlotClaim::unchecked::

(author_pub, slot_now, timestamp))) +} + +/// Seal a block with a signature in the header. +pub fn seal( + pre_sealed: B, + storage_changes: StorageChanges>, + author_pub: &P::Public, + keystore: &KeystorePtr, +) -> Result, Box> +where + P: Pair, + P::Signature: Encode + Decode + TryFrom>, + P::Public: AppPublic, +{ + let (pre_header, body) = pre_sealed.deconstruct(); + let pre_hash = pre_header.hash(); + let block_number = *pre_header.number(); + + // seal the block. + let block_import_params = { + let seal_digest = + aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?; + let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header); + block_import_params.post_digests.push(seal_digest); + block_import_params.body = Some(body.clone()); + block_import_params.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); + block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block_import_params + }; + let post_hash = block_import_params.post_hash(); + + tracing::info!( + target: crate::LOG_TARGET, + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + block_number, + post_hash, + pre_hash, + ); + + Ok(block_import_params) +} diff --git a/client/consensus/aura/src/collators/basic.rs b/client/consensus/aura/src/collators/basic.rs new file mode 100644 index 00000000000..e0ba76ac075 --- /dev/null +++ b/client/consensus/aura/src/collators/basic.rs @@ -0,0 +1,200 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! This provides the option to run a basic relay-chain driven Aura implementation. +//! +//! This collator only builds on top of the most recently included block, limiting the +//! block time to a maximum of two times the relay-chain block time, and requiring the +//! block to be built and distributed to validators between two relay-chain blocks. +//! +//! For more information about AuRa, the Substrate crate should be checked. + +use codec::{Decode, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::ParachainBlockImportMarker; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_core::{relay_chain::BlockId as RBlockId, CollectCollationInfo}; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_node_primitives::CollationResult; +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{CollatorPair, Id as ParaId}; + +use futures::prelude::*; +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; +use sc_consensus::BlockImport; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus::SyncOracle; +use sp_consensus_aura::{AuraApi, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use std::{convert::TryFrom, hash::Hash, sync::Arc, time::Duration}; + +use crate::collator as collator_util; + +/// Parameters for [`run`]. +pub struct Params { + pub create_inherent_data_providers: CIDP, + pub block_import: BI, + pub para_client: Arc, + pub relay_client: Arc, + pub sync_oracle: SO, + pub keystore: KeystorePtr, + pub key: CollatorPair, + pub para_id: ParaId, + pub overseer_handle: OverseerHandle, + pub slot_duration: SlotDuration, + pub relay_chain_slot_duration: SlotDuration, + pub proposer: Proposer, + pub collator_service: CS, +} + +/// Run bare Aura consensus as a relay-chain-driven collator. +pub async fn run( + params: Params, +) where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: AuraApi + CollectCollationInfo, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( + params.key, + params.para_id, + params.overseer_handle, + ) + .await; + + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + relay_client: params.relay_client.clone(), + keystore: params.keystore.clone(), + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + }; + + collator_util::Collator::::new(params) + }; + + while let Some(request) = collation_requests.next().await { + macro_rules! reject_with_error { + ($err:expr) => {{ + request.complete(None); + tracing::error!(target: crate::LOG_TARGET, err = ?{ $err }); + continue; + }}; + } + + macro_rules! try_request { + ($x:expr) => {{ + match $x { + Ok(x) => x, + Err(e) => reject_with_error!(e), + } + }}; + } + + let validation_data = request.persisted_validation_data(); + + let parent_header = + try_request!(Block::Header::decode(&mut &validation_data.parent_head.0[..])); + + let parent_hash = parent_header.hash(); + + if !collator.collator_service().check_block_status(parent_hash, &parent_header) { + continue + } + + let relay_parent_header = + match params.relay_client.header(RBlockId::hash(*request.relay_parent())).await { + Err(e) => reject_with_error!(e), + Ok(None) => continue, // sanity: would be inconsistent to get `None` here + Ok(Some(h)) => h, + }; + + let claim = match collator_util::claim_slot::<_, _, P>( + &*params.para_client, + parent_hash, + &relay_parent_header, + params.slot_duration, + params.relay_chain_slot_duration, + ¶ms.keystore, + ) + .await + { + Ok(None) => continue, + Ok(Some(c)) => c, + Err(e) => reject_with_error!(e), + }; + + let (parachain_inherent_data, other_inherent_data) = try_request!( + collator + .create_inherent_data( + *request.relay_parent(), + &validation_data, + parent_hash, + claim.timestamp(), + ) + .await + ); + + let (collation, _, post_hash) = try_request!( + collator + .collate( + &parent_header, + &claim, + None, + (parachain_inherent_data, other_inherent_data), + // TODO [https://github.com/paritytech/cumulus/issues/2439] + // We should call out to a pluggable interface that provides + // the proposal duration. + Duration::from_millis(500), + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await + ); + + let result_sender = Some(collator.collator_service().announce_with_barrier(post_hash)); + request.complete(Some(CollationResult { collation, result_sender })); + } +} diff --git a/client/consensus/aura/src/collators/lookahead.rs b/client/consensus/aura/src/collators/lookahead.rs new file mode 100644 index 00000000000..50fdb8b34ba --- /dev/null +++ b/client/consensus/aura/src/collators/lookahead.rs @@ -0,0 +1,346 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! A collator for Aura that looks ahead of the most recently included parachain block +//! when determining what to build upon. +//! +//! This collator also builds additional blocks when the maximum backlog is not saturated. +//! The size of the backlog is determined by invoking a runtime API. If that runtime API +//! is not supported, this assumes a maximum backlog size of 1. +//! +//! This takes more advantage of asynchronous backing, though not complete advantage. +//! When the backlog is not saturated, this approach lets the backlog temporarily 'catch up' +//! with periods of higher throughput. When the backlog is saturated, we typically +//! fall back to the limited cadence of a single parachain block per relay-chain block. +//! +//! Despite this, the fact that there is a backlog at all allows us to spend more time +//! building the block, as there is some buffer before it can get posted to the relay-chain. +//! The main limitation is block propagation time - i.e. the new blocks created by an author +//! must be propagated to the next author before their turn. + +use codec::{Decode, Encode}; +use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; +use cumulus_client_consensus_common::{ + self as consensus_common, ParachainBlockImportMarker, ParentSearchParams, +}; +use cumulus_client_consensus_proposer::ProposerInterface; +use cumulus_primitives_aura::AuraUnincludedSegmentApi; +use cumulus_primitives_core::{ + relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, +}; +use cumulus_relay_chain_interface::RelayChainInterface; + +use polkadot_overseer::Handle as OverseerHandle; +use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; + +use futures::prelude::*; +use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; +use sc_consensus::BlockImport; +use sc_consensus_aura::standalone as aura_internal; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::AppPublic; +use sp_blockchain::HeaderBackend; +use sp_consensus::SyncOracle; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::KeystorePtr; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; +use sp_timestamp::Timestamp; +use std::{convert::TryFrom, hash::Hash, sync::Arc, time::Duration}; + +use crate::collator::{self as collator_util, SlotClaim}; + +/// Parameters for [`run`]. +pub struct Params { + pub create_inherent_data_providers: CIDP, + pub block_import: BI, + pub para_client: Arc, + pub para_backend: Arc, + pub relay_client: Arc, + pub sync_oracle: SO, + pub keystore: KeystorePtr, + pub key: CollatorPair, + pub para_id: ParaId, + pub overseer_handle: OverseerHandle, + pub slot_duration: SlotDuration, + pub relay_chain_slot_duration: SlotDuration, + pub proposer: Proposer, + pub collator_service: CS, + pub authoring_duration: Duration, +} + +/// Run async-backing-friendly Aura. +pub async fn run( + params: Params, +) where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sp_blockchain::Backend, + RClient: RelayChainInterface, + CIDP: CreateInherentDataProviders + 'static, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + Proposer: ProposerInterface, + Proposer::Transaction: Sync, + CS: CollatorServiceInterface, + P: Pair + Send + Sync, + P::Public: AppPublic + Hash + Member + Encode + Decode, + P::Signature: TryFrom> + Hash + Member + Encode + Decode, +{ + // This is an arbitrary value which is likely guaranteed to exceed any reasonable + // limit, as it would correspond to 10 non-included blocks. + // + // Since we only search for parent blocks which have already been imported, + // we can guarantee that all imported blocks respect the unincluded segment + // rules specified by the parachain's runtime and thus will never be too deep. + const PARENT_SEARCH_DEPTH: usize = 10; + + let mut import_notifications = match params.relay_client.import_notification_stream().await { + Ok(s) => s, + Err(err) => { + tracing::error!( + target: crate::LOG_TARGET, + ?err, + "Failed to initialize consensus: no relay chain import notification stream" + ); + + return + }, + }; + + let mut collator = { + let params = collator_util::Params { + create_inherent_data_providers: params.create_inherent_data_providers, + block_import: params.block_import, + relay_client: params.relay_client.clone(), + keystore: params.keystore.clone(), + para_id: params.para_id, + proposer: params.proposer, + collator_service: params.collator_service, + }; + + collator_util::Collator::::new(params) + }; + + while let Some(relay_parent_header) = import_notifications.next().await { + let relay_parent = relay_parent_header.hash(); + + let max_pov_size = match params + .relay_client + .persisted_validation_data( + relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ) + .await + { + Ok(None) => continue, + Ok(Some(pvd)) => pvd.max_pov_size, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to gather information from relay-client"); + continue + }, + }; + + let (slot_now, timestamp) = match consensus_common::relay_slot_and_timestamp( + &relay_parent_header, + params.relay_chain_slot_duration, + ) { + None => continue, + Some((_, t)) => (Slot::from_timestamp(t, params.slot_duration), t), + }; + + let parent_search_params = ParentSearchParams { + relay_parent, + para_id: params.para_id, + ancestry_lookback: max_ancestry_lookback(relay_parent, ¶ms.relay_client).await, + max_depth: PARENT_SEARCH_DEPTH, + ignore_alternative_branches: true, + }; + + let potential_parents = cumulus_client_consensus_common::find_potential_parents::( + parent_search_params, + &*params.para_backend, + ¶ms.relay_client, + ) + .await; + + let mut potential_parents = match potential_parents { + Err(e) => { + tracing::error!( + target: crate::LOG_TARGET, + ?relay_parent, + err = ?e, + "Could not fetch potential parents to build upon" + ); + + continue + }, + Ok(x) => x, + }; + + let included_block = match potential_parents.iter().find(|x| x.depth == 0) { + None => continue, // also serves as an `is_empty` check. + Some(b) => b.hash, + }; + + let para_client = &*params.para_client; + let keystore = ¶ms.keystore; + let can_build_upon = |block_hash| { + can_build_upon::<_, _, P>( + slot_now, + timestamp, + block_hash, + included_block, + para_client, + &keystore, + ) + }; + + // Sort by depth, ascending, to choose the longest chain. + // + // If the longest chain has space, build upon that. Otherwise, don't + // build at all. + potential_parents.sort_by_key(|a| a.depth); + let initial_parent = match potential_parents.pop() { + None => continue, + Some(p) => p, + }; + + // Build in a loop until not allowed. Note that the authorities can change + // at any block, so we need to re-claim our slot every time. + let mut parent_hash = initial_parent.hash; + let mut parent_header = initial_parent.header; + loop { + let slot_claim = match can_build_upon(parent_hash).await { + None => break, + Some(c) => c, + }; + + let validation_data = PersistedValidationData { + parent_head: parent_header.encode().into(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; + + // Build and announce collations recursively until + // `can_build_upon` fails or building a collation fails. + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data( + relay_parent, + &validation_data, + parent_hash, + slot_claim.timestamp(), + ) + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break + }, + Ok(x) => x, + }; + + match collator + .collate( + &parent_header, + &slot_claim, + None, + (parachain_inherent_data, other_inherent_data), + params.authoring_duration, + // Set the block limit to 50% of the maximum PoV size. + // + // TODO: If we got benchmarking that includes the proof size, + // we should be able to use the maximum pov size. + (validation_data.max_pov_size / 2) as usize, + ) + .await + { + Ok((_collation, block_data, new_block_hash)) => { + parent_hash = new_block_hash; + parent_header = block_data.into_header(); + + // Here we are assuming that the import logic protects against equivocations + // and provides sybil-resistance, as it should. + collator.collator_service().announce_block(new_block_hash, None); + + // TODO [https://github.com/paritytech/polkadot/issues/5056]: + // announce collation to relay-chain validators. + }, + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err); + break + }, + } + } + } +} + +// Checks if we own the slot at the given block and whether there +// is space in the unincluded segment. +async fn can_build_upon( + slot: Slot, + timestamp: Timestamp, + parent_hash: Block::Hash, + included_block: Block::Hash, + client: &Client, + keystore: &KeystorePtr, +) -> Option> +where + Client: ProvideRuntimeApi, + Client::Api: AuraApi + AuraUnincludedSegmentApi, + P: Pair, + P::Public: Encode + Decode, + P::Signature: Encode + Decode, +{ + let runtime_api = client.runtime_api(); + let authorities = runtime_api.authorities(parent_hash).ok()?; + let author_pub = aura_internal::claim_slot::

(slot, &authorities, keystore).await?; + + // Here we lean on the property that building on an empty unincluded segment must always + // be legal. Skipping the runtime API query here allows us to seamlessly run this + // collator against chains which have not yet upgraded their runtime. + if parent_hash != included_block { + runtime_api.can_build_upon(parent_hash, included_block, slot).ok()?; + } + + Some(SlotClaim::unchecked::

(author_pub, slot, timestamp)) +} + +async fn max_ancestry_lookback( + _relay_parent: PHash, + _relay_client: &impl RelayChainInterface, +) -> usize { + // TODO [https://github.com/paritytech/cumulus/issues/2706] + // We need to read the relay-chain state to know what the maximum + // age truly is, but that depends on those pallets existing. + // + // For now, just provide the conservative value of '2'. + // Overestimating can cause problems, as we'd be building on forks of the + // chain that can never get included. Underestimating is less of an issue. + 2 +} diff --git a/client/consensus/aura/src/collators/mod.rs b/client/consensus/aura/src/collators/mod.rs new file mode 100644 index 00000000000..55128dfdc85 --- /dev/null +++ b/client/consensus/aura/src/collators/mod.rs @@ -0,0 +1,24 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Stock, pure Aura collators. +//! +//! This includes the [`basic`] collator, which only builds on top of the most recently +//! included parachain block, as well as the [`lookahead`] collator, which prospectively +//! builds on parachain blocks which have not yet been included in the relay chain. + +pub mod basic; +pub mod lookahead; diff --git a/client/consensus/aura/src/equivocation_import_queue.rs b/client/consensus/aura/src/equivocation_import_queue.rs new file mode 100644 index 00000000000..ac9c2a52829 --- /dev/null +++ b/client/consensus/aura/src/equivocation_import_queue.rs @@ -0,0 +1,254 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +/// An import queue which provides some equivocation resistance with lenient trait bounds. +/// +/// Equivocation resistance in general is a hard problem, as different nodes in the network +/// may see equivocations in a different order, and therefore may not agree on which blocks +/// should be thrown out and which ones should be kept. +use codec::{Decode, Encode}; +use cumulus_client_consensus_common::ParachainBlockImportMarker; +use lru::LruCache; + +use sc_consensus::{ + import_queue::{BasicQueue, Verifier as VerifierT}, + BlockImport, BlockImportParams, ForkChoiceStrategy, +}; +use sc_consensus_aura::standalone as aura_internal; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_consensus::error::Error as ConsensusError; +use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; +use sp_core::crypto::Pair; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::{fmt::Debug, num::NonZeroUsize, sync::Arc}; + +const LRU_WINDOW: usize = 256; +const EQUIVOCATION_LIMIT: usize = 16; + +struct NaiveEquivocationDefender { + cache: LruCache, +} + +impl Default for NaiveEquivocationDefender { + fn default() -> Self { + NaiveEquivocationDefender { + cache: LruCache::new(NonZeroUsize::new(LRU_WINDOW).expect("window > 0; qed")), + } + } +} + +impl NaiveEquivocationDefender { + // return `true` if equivocation is beyond the limit. + fn insert_and_check(&mut self, slot: Slot) -> bool { + let val = self.cache.get_or_insert_mut(*slot, || 0); + if *val == EQUIVOCATION_LIMIT { + true + } else { + *val += 1; + false + } + } +} + +struct Verifier { + client: Arc, + create_inherent_data_providers: CIDP, + slot_duration: SlotDuration, + defender: NaiveEquivocationDefender, + telemetry: Option, + _marker: std::marker::PhantomData<(Block, P)>, +} + +#[async_trait::async_trait] +impl VerifierT for Verifier +where + P: Pair, + P::Signature: Encode + Decode, + P::Public: Encode + Decode + PartialEq + Clone + Debug, + Block: BlockT, + Client: ProvideRuntimeApi + Send + Sync, + >::Api: BlockBuilderApi + AuraApi, + + CIDP: CreateInherentDataProviders, +{ + async fn verify( + &mut self, + mut block_params: BlockImportParams, + ) -> Result, String> { + // Skip checks that include execution, if being told so, or when importing only state. + // + // This is done for example when gap syncing and it is expected that the block after the gap + // was checked/chosen properly, e.g. by warp syncing to this block using a finality proof. + if block_params.state_action.skip_execution_checks() || block_params.with_state() { + return Ok(block_params) + } + + let post_hash = block_params.header.hash(); + let parent_hash = *block_params.header.parent_hash(); + + // check seal and update pre-hash/post-hash + { + let authorities = aura_internal::fetch_authorities(self.client.as_ref(), parent_hash) + .map_err(|e| { + format!("Could not fetch authorities at {:?}: {}", parent_hash, e) + })?; + + let slot_now = slot_now(self.slot_duration); + let res = aura_internal::check_header_slot_and_seal::( + slot_now, + block_params.header, + &authorities, + ); + + match res { + Ok((pre_header, slot, seal_digest)) => { + telemetry!( + self.telemetry; + CONSENSUS_TRACE; + "aura.checked_and_importing"; + "pre_header" => ?pre_header, + ); + + block_params.header = pre_header; + block_params.post_digests.push(seal_digest); + block_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block_params.post_hash = Some(post_hash); + + // Check for and reject egregious amounts of equivocations. + if self.defender.insert_and_check(slot) { + return Err(format!( + "Rejecting block {:?} due to excessive equivocations at slot", + post_hash, + )) + } + }, + Err(aura_internal::SealVerificationError::Deferred(hdr, slot)) => { + telemetry!( + self.telemetry; + CONSENSUS_DEBUG; + "aura.header_too_far_in_future"; + "hash" => ?post_hash, + "a" => ?hdr, + "b" => ?slot, + ); + + return Err(format!( + "Rejecting block ({:?}) from future slot {:?}", + post_hash, slot + )) + }, + Err(e) => + return Err(format!( + "Rejecting block ({:?}) with invalid seal ({:?})", + post_hash, e + )), + } + } + + // check inherents. + if let Some(body) = block_params.body.clone() { + let block = Block::new(block_params.header.clone(), body); + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| format!("Could not create inherent data {:?}", e))?; + + let inherent_data = create_inherent_data_providers + .create_inherent_data() + .await + .map_err(|e| format!("Could not create inherent data {:?}", e))?; + + let inherent_res = self + .client + .runtime_api() + .check_inherents_with_context( + parent_hash, + block_params.origin.into(), + block, + inherent_data, + ) + .map_err(|e| format!("Unable to check block inherents {:?}", e))?; + + if !inherent_res.ok() { + for (i, e) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&i, &e).await { + Some(res) => res.map_err(|e| format!("Inherent Error {:?}", e))?, + None => + return Err(format!( + "Unknown inherent error, source {:?}", + String::from_utf8_lossy(&i[..]) + )), + } + } + } + } + + Ok(block_params) + } +} + +fn slot_now(slot_duration: SlotDuration) -> Slot { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time().timestamp(); + Slot::from_timestamp(timestamp, slot_duration) +} + +/// Start an import queue for a Cumulus node which checks blocks' seals and inherent data. +/// +/// Pass in only inherent data providers which don't include aura or parachain consensus inherents, +/// e.g. things like timestamp and custom inherents for the runtime. +/// +/// The others are generated explicitly internally. +/// +/// This should only be used for runtimes where the runtime does not check all inherents and +/// seals in `execute_block` (see ) +pub fn fully_verifying_import_queue( + client: Arc, + block_import: I, + create_inherent_data_providers: CIDP, + slot_duration: SlotDuration, + spawner: &impl sp_core::traits::SpawnEssentialNamed, + registry: Option<&substrate_prometheus_endpoint::Registry>, + telemetry: Option, +) -> BasicQueue +where + P: Pair, + P::Signature: Encode + Decode, + P::Public: Encode + Decode + PartialEq + Clone + Debug, + I: BlockImport + + ParachainBlockImportMarker + + Send + + Sync + + 'static, + I::Transaction: Send, + Client: ProvideRuntimeApi + Send + Sync + 'static, + >::Api: BlockBuilderApi + AuraApi, + CIDP: CreateInherentDataProviders + 'static, +{ + let verifier = Verifier:: { + client, + create_inherent_data_providers, + defender: NaiveEquivocationDefender::default(), + slot_duration, + telemetry, + _marker: std::marker::PhantomData, + }; + + BasicQueue::new(verifier, Box::new(block_import), None, spawner, registry) +} diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 416ae776da1..51d54bf5714 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -50,7 +50,9 @@ pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, Import pub use sc_consensus_aura::{slot_duration, AuraVerifier, BuildAuraWorkerParams, SlotProportion}; pub use sc_consensus_slots::InherentDataProviderExt; -pub mod unstable_reimpl; +pub mod collator; +pub mod collators; +pub mod equivocation_import_queue; const LOG_TARGET: &str = "aura::cumulus"; diff --git a/client/consensus/aura/src/unstable_reimpl.rs b/client/consensus/aura/src/unstable_reimpl.rs deleted file mode 100644 index f9602a363bf..00000000000 --- a/client/consensus/aura/src/unstable_reimpl.rs +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright 2023 Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! The AuRa consensus algorithm for parachains. -//! -//! This extends the Substrate provided AuRa consensus implementation to make it compatible for -//! parachains. This provides the option to run a "bare" relay-chain driven Aura implementation, -//! but also exposes the core functionalities separately to be composed into more complex implementations. -//! -//! For more information about AuRa, the Substrate crate should be checked. - -use codec::{Decode, Encode}; -use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; -use cumulus_client_consensus_common::{ParachainBlockImportMarker, ParachainCandidate}; -use cumulus_client_consensus_proposer::ProposerInterface; -use cumulus_primitives_core::{ - relay_chain::Hash as PHash, CollectCollationInfo, PersistedValidationData, -}; -use cumulus_primitives_parachain_inherent::ParachainInherentData; -use cumulus_relay_chain_interface::RelayChainInterface; - -use polkadot_node_primitives::{CollationResult, MaybeCompressedPoV}; -use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId}; - -use futures::prelude::*; -use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; -use sc_consensus::{ - import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction, -}; -use sc_consensus_aura::standalone as aura_internal; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::AppPublic; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::HeaderBackend; -use sp_consensus::{error::Error as ConsensusError, BlockOrigin, SyncOracle}; -use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; -use sp_core::crypto::Pair; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; -use sp_keystore::KeystorePtr; -use sp_runtime::{ - generic::Digest, - traits::{Block as BlockT, HashFor, Header as HeaderT, Member}, -}; -use sp_state_machine::StorageChanges; -use std::{convert::TryFrom, error::Error, fmt::Debug, hash::Hash, sync::Arc, time::Duration}; - -/// Parameters for [`run_bare_relay_driven`]. -pub struct Params { - pub create_inherent_data_providers: CIDP, - pub block_import: BI, - pub para_client: Arc, - pub relay_client: Arc, - pub sync_oracle: SO, - pub keystore: KeystorePtr, - pub key: CollatorPair, - pub para_id: ParaId, - pub overseer_handle: OverseerHandle, - pub slot_duration: SlotDuration, - pub proposer: Proposer, - pub collator_service: CS, -} - -/// Run bare Aura consensus as a relay-chain-driven collator. -pub async fn run_bare_relay_driven( - params: Params, -) where - Block: BlockT, - Client: ProvideRuntimeApi - + BlockOf - + AuxStore - + HeaderBackend - + BlockBackend - + Send - + Sync - + 'static, - Client::Api: AuraApi + CollectCollationInfo, - RClient: RelayChainInterface, - CIDP: CreateInherentDataProviders + 'static, - BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone + 'static, - Proposer: ProposerInterface, - Proposer::Transaction: Sync, - CS: CollatorServiceInterface, - P: Pair + Send + Sync, - P::Public: AppPublic + Hash + Member + Encode + Decode, - P::Signature: TryFrom> + Hash + Member + Encode + Decode, -{ - let mut proposer = params.proposer; - let mut block_import = params.block_import; - - let mut collation_requests = cumulus_client_collator::relay_chain_driven::init( - params.key, - params.para_id, - params.overseer_handle, - ) - .await; - - while let Some(request) = collation_requests.next().await { - macro_rules! reject_with_error { - ($err:expr) => {{ - request.complete(None); - tracing::error!(target: crate::LOG_TARGET, err = ?{ $err }); - continue; - }}; - } - - let validation_data = request.persisted_validation_data(); - - let parent_header = match Block::Header::decode(&mut &validation_data.parent_head.0[..]) { - Ok(x) => x, - Err(e) => reject_with_error!(e), - }; - - let parent_hash = parent_header.hash(); - - if !params.collator_service.check_block_status(parent_hash, &parent_header) { - continue - } - - let claim = match claim_slot::<_, _, P>( - &*params.para_client, - parent_hash, - params.slot_duration, - ¶ms.keystore, - ) - .await - { - Ok(None) => continue, - Ok(Some(c)) => c, - Err(e) => reject_with_error!(e), - }; - - let (parachain_inherent_data, other_inherent_data) = match create_inherent_data( - *request.relay_parent(), - &validation_data, - parent_hash, - params.para_id, - ¶ms.relay_client, - ¶ms.create_inherent_data_providers, - ) - .await - { - Ok(x) => x, - Err(e) => reject_with_error!(e), - }; - - let proposal = match proposer - .propose( - &parent_header, - ¶chain_inherent_data, - other_inherent_data, - Digest { logs: vec![claim.pre_digest] }, - // TODO [https://github.com/paritytech/cumulus/issues/2439] - // We should call out to a pluggable interface that provides - // the proposal duration. - Duration::from_millis(500), - // Set the block limit to 50% of the maximum PoV size. - // - // TODO: If we got benchmarking that includes the proof size, - // we should be able to use the maximum pov size. - Some((validation_data.max_pov_size / 2) as usize), - ) - .await - { - Ok(p) => p, - Err(e) => reject_with_error!(e), - }; - - let sealed_importable = match seal::<_, _, P>( - proposal.block, - proposal.storage_changes, - &claim.author_pub, - ¶ms.keystore, - ) { - Ok(s) => s, - Err(e) => reject_with_error!(e), - }; - - let post_hash = sealed_importable.post_hash(); - let block = Block::new( - sealed_importable.post_header(), - sealed_importable - .body - .as_ref() - .expect("body always created with this `propose` fn; qed") - .clone(), - ); - - if let Err(e) = block_import.import_block(sealed_importable).await { - reject_with_error!(e); - } - - let response = if let Some((collation, b)) = params.collator_service.build_collation( - &parent_header, - post_hash, - ParachainCandidate { block, proof: proposal.proof }, - ) { - tracing::info!( - target: crate::LOG_TARGET, - "PoV size {{ header: {}kb, extrinsics: {}kb, storage_proof: {}kb }}", - b.header().encode().len() as f64 / 1024f64, - b.extrinsics().encode().len() as f64 / 1024f64, - b.storage_proof().encode().len() as f64 / 1024f64, - ); - - if let MaybeCompressedPoV::Compressed(ref pov) = collation.proof_of_validity { - tracing::info!( - target: crate::LOG_TARGET, - "Compressed PoV size: {}kb", - pov.block_data.0.len() as f64 / 1024f64, - ); - } - - let result_sender = params.collator_service.announce_with_barrier(post_hash); - Some(CollationResult { collation, result_sender: Some(result_sender) }) - } else { - None - }; - - request.complete(response); - } -} - -fn slot_now(slot_duration: SlotDuration) -> Slot { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time().timestamp(); - Slot::from_timestamp(timestamp, slot_duration) -} - -/// A claim on an Aura slot. -struct SlotClaim { - author_pub: Pub, - pre_digest: sp_runtime::DigestItem, -} - -async fn claim_slot( - client: &C, - parent_hash: B::Hash, - slot_duration: SlotDuration, - keystore: &KeystorePtr, -) -> Result>, Box> -where - B: BlockT, - C: ProvideRuntimeApi + Send + Sync + 'static, - C::Api: AuraApi, - P: Pair, - P::Public: Encode + Decode, - P::Signature: Encode + Decode, -{ - // load authorities - let authorities = client.runtime_api().authorities(parent_hash).map_err(Box::new)?; - - // Determine the current slot. - let slot_now = slot_now(slot_duration); - - // Try to claim the slot locally. - let author_pub = { - let res = aura_internal::claim_slot::

(slot_now, &authorities, keystore).await; - match res { - Some(p) => p, - None => return Ok(None), - } - }; - - // Produce the pre-digest. - let pre_digest = aura_internal::pre_digest::

(slot_now); - - Ok(Some(SlotClaim { author_pub, pre_digest })) -} - -async fn create_inherent_data( - relay_parent: PHash, - validation_data: &PersistedValidationData, - parent_hash: B::Hash, - para_id: ParaId, - relay_chain_interface: &impl RelayChainInterface, - create_inherent_data_providers: &impl CreateInherentDataProviders, -) -> Result<(ParachainInherentData, InherentData), Box> { - let paras_inherent_data = ParachainInherentData::create_at( - relay_parent, - relay_chain_interface, - validation_data, - para_id, - ) - .await; - - let paras_inherent_data = match paras_inherent_data { - Some(p) => p, - None => - return Err(format!("Could not create paras inherent data at {:?}", relay_parent).into()), - }; - - let other_inherent_data = create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .map_err(|e| e as Box) - .await? - .create_inherent_data() - .await - .map_err(Box::new)?; - - Ok((paras_inherent_data, other_inherent_data)) -} - -fn seal( - pre_sealed: B, - storage_changes: StorageChanges>, - author_pub: &P::Public, - keystore: &KeystorePtr, -) -> Result, Box> -where - P: Pair, - P::Signature: Encode + Decode + TryFrom>, - P::Public: AppPublic, -{ - let (pre_header, body) = pre_sealed.deconstruct(); - let pre_hash = pre_header.hash(); - let block_number = *pre_header.number(); - - // seal the block. - let block_import_params = { - let seal_digest = - aura_internal::seal::<_, P>(&pre_hash, &author_pub, keystore).map_err(Box::new)?; - let mut block_import_params = BlockImportParams::new(BlockOrigin::Own, pre_header); - block_import_params.post_digests.push(seal_digest); - block_import_params.body = Some(body.clone()); - block_import_params.state_action = - StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - block_import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_import_params - }; - let post_hash = block_import_params.post_hash(); - - tracing::info!( - target: crate::LOG_TARGET, - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - block_number, - post_hash, - pre_hash, - ); - - Ok(block_import_params) -} - -struct Verifier { - client: Arc, - create_inherent_data_providers: CIDP, - slot_duration: SlotDuration, - telemetry: Option, - _marker: std::marker::PhantomData<(Block, P)>, -} - -#[async_trait::async_trait] -impl VerifierT for Verifier -where - P: Pair, - P::Signature: Encode + Decode, - P::Public: Encode + Decode + PartialEq + Clone + Debug, - Block: BlockT, - Client: ProvideRuntimeApi + Send + Sync, - >::Api: BlockBuilderApi + AuraApi, - - CIDP: CreateInherentDataProviders, -{ - async fn verify( - &mut self, - mut block_params: BlockImportParams, - ) -> Result, String> { - // Skip checks that include execution, if being told so, or when importing only state. - // - // This is done for example when gap syncing and it is expected that the block after the gap - // was checked/chosen properly, e.g. by warp syncing to this block using a finality proof. - if block_params.state_action.skip_execution_checks() || block_params.with_state() { - return Ok(block_params) - } - - let post_hash = block_params.header.hash(); - let parent_hash = *block_params.header.parent_hash(); - - // check seal and update pre-hash/post-hash - { - let authorities = aura_internal::fetch_authorities(self.client.as_ref(), parent_hash) - .map_err(|e| { - format!("Could not fetch authorities at {:?}: {}", parent_hash, e) - })?; - - let slot_now = slot_now(self.slot_duration); - let res = aura_internal::check_header_slot_and_seal::( - slot_now, - block_params.header, - &authorities, - ); - - match res { - Ok((pre_header, _slot, seal_digest)) => { - telemetry!( - self.telemetry; - CONSENSUS_TRACE; - "aura.checked_and_importing"; - "pre_header" => ?pre_header, - ); - - block_params.header = pre_header; - block_params.post_digests.push(seal_digest); - block_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_params.post_hash = Some(post_hash); - }, - Err(aura_internal::SealVerificationError::Deferred(hdr, slot)) => { - telemetry!( - self.telemetry; - CONSENSUS_DEBUG; - "aura.header_too_far_in_future"; - "hash" => ?post_hash, - "a" => ?hdr, - "b" => ?slot, - ); - - return Err(format!( - "Rejecting block ({:?}) from future slot {:?}", - post_hash, slot - )) - }, - Err(e) => - return Err(format!( - "Rejecting block ({:?}) with invalid seal ({:?})", - post_hash, e - )), - } - } - - // check inherents. - if let Some(body) = block_params.body.clone() { - let block = Block::new(block_params.header.clone(), body); - let create_inherent_data_providers = self - .create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await - .map_err(|e| format!("Could not create inherent data {:?}", e))?; - - let inherent_data = create_inherent_data_providers - .create_inherent_data() - .await - .map_err(|e| format!("Could not create inherent data {:?}", e))?; - - let inherent_res = self - .client - .runtime_api() - .check_inherents_with_context( - parent_hash, - block_params.origin.into(), - block, - inherent_data, - ) - .map_err(|e| format!("Unable to check block inherents {:?}", e))?; - - if !inherent_res.ok() { - for (i, e) in inherent_res.into_errors() { - match create_inherent_data_providers.try_handle_error(&i, &e).await { - Some(res) => res.map_err(|e| format!("Inherent Error {:?}", e))?, - None => - return Err(format!( - "Unknown inherent error, source {:?}", - String::from_utf8_lossy(&i[..]) - )), - } - } - } - } - - Ok(block_params) - } -} - -/// Start an import queue for a Cumulus node which checks blocks' seals and inherent data. -/// -/// Pass in only inherent data providers which don't include aura or parachain consensus inherents, -/// e.g. things like timestamp and custom inherents for the runtime. -/// -/// The others are generated explicitly internally. -/// -/// This should only be used for runtimes where the runtime does not check all inherents and -/// seals in `execute_block` (see ) -pub fn fully_verifying_import_queue( - client: Arc, - block_import: I, - create_inherent_data_providers: CIDP, - slot_duration: SlotDuration, - spawner: &impl sp_core::traits::SpawnEssentialNamed, - registry: Option<&substrate_prometheus_endpoint::Registry>, - telemetry: Option, -) -> BasicQueue -where - P: Pair, - P::Signature: Encode + Decode, - P::Public: Encode + Decode + PartialEq + Clone + Debug, - I: BlockImport - + ParachainBlockImportMarker - + Send - + Sync - + 'static, - I::Transaction: Send, - Client: ProvideRuntimeApi + Send + Sync + 'static, - >::Api: BlockBuilderApi + AuraApi, - CIDP: CreateInherentDataProviders + 'static, -{ - let verifier = Verifier:: { - client, - create_inherent_data_providers, - slot_duration, - telemetry, - _marker: std::marker::PhantomData, - }; - - BasicQueue::new(verifier, Box::new(block_import), None, spawner, registry) -} diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index c41aa8f6d40..b7f1d5ae04f 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -16,10 +16,13 @@ tracing = "0.1.37" # Substrate sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } +sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v1.0.0" } diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index f3ef4a3023a..48ac4e96344 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -14,11 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use polkadot_primitives::{Hash as PHash, PersistedValidationData}; +use codec::Decode; +use polkadot_primitives::{ + Block as PBlock, Hash as PHash, Header as PHeader, PersistedValidationData, +}; + +use cumulus_primitives_core::{ + relay_chain::{BlockId as RBlockId, OccupiedCoreAssumption}, + ParaId, +}; +use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; use sc_client_api::Backend; use sc_consensus::{shared_data::SharedData, BlockImport, ImportResult}; +use sp_consensus_slots::{Slot, SlotDuration}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_timestamp::Timestamp; use std::sync::Arc; @@ -181,3 +192,193 @@ where pub trait ParachainBlockImportMarker {} impl ParachainBlockImportMarker for ParachainBlockImport {} + +/// Parameters when searching for suitable parents to build on top of. +pub struct ParentSearchParams { + /// The relay-parent that is intended to be used. + pub relay_parent: PHash, + /// The ID of the parachain. + pub para_id: ParaId, + /// A limitation on the age of relay parents for parachain blocks that are being + /// considered. This is relative to the `relay_parent` number. + pub ancestry_lookback: usize, + /// How "deep" parents can be relative to the included parachain block at the relay-parent. + /// The included block has depth 0. + pub max_depth: usize, + /// Whether to only ignore "alternative" branches, i.e. branches of the chain + /// which do not contain the block pending availability. + pub ignore_alternative_branches: bool, +} + +/// A potential parent block returned from [`find_potential_parents`] +pub struct PotentialParent { + /// The hash of the block. + pub hash: B::Hash, + /// The header of the block. + pub header: B::Header, + /// The depth of the block. + pub depth: usize, + /// Whether the block is the included block, is itself pending on-chain, or descends + /// from the block pending availability. + pub aligned_with_pending: bool, +} + +/// Perform a recursive search through blocks to find potential +/// parent blocks for a new block. +/// +/// This accepts a relay-chain block to be used as an anchor and a maximum search depth, +/// along with some arguments for filtering parachain blocks and performs a recursive search +/// for parachain blocks. The search begins at the last included parachain block and returns +/// a set of [`PotentialParent`]s which could be potential parents of a new block with this +/// relay-parent according to the search parameters. +/// +/// A parachain block is a potential parent if it is either the last included parachain block, the pending +/// parachain block (when `max_depth` >= 1), or all of the following hold: +/// * its parent is a potential parent +/// * its relay-parent is within `ancestry_lookback` of the targeted relay-parent. +/// * the block number is within `max_depth` blocks of the included block +pub async fn find_potential_parents( + params: ParentSearchParams, + client: &impl sp_blockchain::Backend, + relay_client: &impl RelayChainInterface, +) -> Result>, RelayChainError> { + // 1. Build up the ancestry record of the relay chain to compare against. + let rp_ancestry = { + let mut ancestry = Vec::with_capacity(params.ancestry_lookback + 1); + let mut current_rp = params.relay_parent; + while ancestry.len() <= params.ancestry_lookback { + let header = match relay_client.header(RBlockId::hash(current_rp)).await? { + None => break, + Some(h) => h, + }; + + ancestry.push((current_rp, *header.state_root())); + current_rp = *header.parent_hash(); + + // don't iterate back into the genesis block. + if header.number == 1 { + break + } + } + + ancestry + }; + + let is_hash_in_ancestry = |hash| rp_ancestry.iter().any(|x| x.0 == hash); + let is_root_in_ancestry = |root| rp_ancestry.iter().any(|x| x.1 == root); + + // 2. Get the included and pending availability blocks. + let included_header = relay_client + .persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::TimedOut, + ) + .await?; + + let included_header = match included_header { + Some(pvd) => pvd.parent_head, + None => return Ok(Vec::new()), // this implies the para doesn't exist. + }; + + let pending_header = relay_client + .persisted_validation_data( + params.relay_parent, + params.para_id, + OccupiedCoreAssumption::Included, + ) + .await? + .and_then(|x| if x.parent_head != included_header { Some(x.parent_head) } else { None }); + + let included_header = match B::Header::decode(&mut &included_header.0[..]).ok() { + None => return Ok(Vec::new()), + Some(x) => x, + }; + // Silently swallow if pending block can't decode. + let pending_header = pending_header.and_then(|p| B::Header::decode(&mut &p.0[..]).ok()); + let included_hash = included_header.hash(); + let pending_hash = pending_header.as_ref().map(|hdr| hdr.hash()); + + let mut frontier = vec![PotentialParent:: { + hash: included_hash, + header: included_header, + depth: 0, + aligned_with_pending: true, + }]; + + // Recursive search through descendants of the included block which have acceptable + // relay parents. + let mut potential_parents = Vec::new(); + while let Some(entry) = frontier.pop() { + let is_pending = + entry.depth == 1 && pending_hash.as_ref().map_or(false, |h| &entry.hash == h); + let is_included = entry.depth == 0; + + // note: even if the pending block or included block have a relay parent + // outside of the expected part of the relay chain, they are always allowed + // because they have already been posted on chain. + let is_potential = is_pending || is_included || { + let digest = entry.header.digest(); + cumulus_primitives_core::extract_relay_parent(digest).map_or(false, is_hash_in_ancestry) || + cumulus_primitives_core::rpsr_digest::extract_relay_parent_storage_root(digest) + .map(|(r, _n)| r) + .map_or(false, is_root_in_ancestry) + }; + + let parent_aligned_with_pending = entry.aligned_with_pending; + let child_depth = entry.depth + 1; + let hash = entry.hash; + + if is_potential { + potential_parents.push(entry); + } + + if !is_potential || child_depth > params.max_depth { + continue + } + + // push children onto search frontier. + for child in client.children(hash).ok().into_iter().flatten() { + let aligned_with_pending = parent_aligned_with_pending && + if child_depth == 1 { + pending_hash.as_ref().map_or(true, |h| &child == h) + } else { + true + }; + + if params.ignore_alternative_branches && !aligned_with_pending { + continue + } + + let header = match client.header(child) { + Ok(Some(h)) => h, + Ok(None) => continue, + Err(_) => continue, + }; + + frontier.push(PotentialParent { + hash: child, + header, + depth: child_depth, + aligned_with_pending, + }); + } + } + + Ok(potential_parents) +} + +/// Get the relay-parent slot and timestamp from a header. +pub fn relay_slot_and_timestamp( + relay_parent_header: &PHeader, + relay_chain_slot_duration: SlotDuration, +) -> Option<(Slot, Timestamp)> { + sc_consensus_babe::find_pre_digest::(relay_parent_header) + .map(|babe_pre_digest| { + let slot = babe_pre_digest.slot(); + let t = Timestamp::new(relay_chain_slot_duration.as_millis() * *slot); + + (slot, t) + }) + .ok() +} diff --git a/client/consensus/common/src/tests.rs b/client/consensus/common/src/tests.rs index ffb9aaee779..c13f839ad82 100644 --- a/client/consensus/common/src/tests.rs +++ b/client/consensus/common/src/tests.rs @@ -186,7 +186,7 @@ impl RelayChainInterface for Relaychain { } async fn wait_for_block(&self, _: PHash) -> RelayChainResult<()> { - unimplemented!("Not needed for test") + Ok(()) } async fn new_best_notification_stream( diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index e226170d7c5..7783ba13b6c 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -17,7 +17,7 @@ //! Parachain specific networking //! //! Provides a custom block announcement implementation for parachains -//! that use the relay chain provided consensus. See [`BlockAnnounceValidator`] +//! that use the relay chain provided consensus. See [`RequireSecondedInBlockAnnounce`] //! and [`WaitToAnnounce`] for more information about this implementation. use sp_consensus::block_validation::{ @@ -185,8 +185,17 @@ impl TryFrom<&'_ CollationSecondedSignal> for BlockAnnounceData { } } +/// A type alias for the [`RequireSecondedInBlockAnnounce`] validator. +#[deprecated = "This has been renamed to RequireSecondedInBlockAnnounce"] +pub type BlockAnnounceValidator = + RequireSecondedInBlockAnnounce; + /// Parachain specific block announce validator. /// +/// This is not required when the collation mechanism itself is sybil-resistant, as it is a spam protection +/// mechanism used to prevent nodes from dealing with unbounded numbers of blocks. For sybil-resistant +/// collation mechanisms, this will only slow things down. +/// /// This block announce validator is required if the parachain is running /// with the relay chain provided consensus to make sure each node only /// imports a reasonable number of blocks per round. The relay chain provided @@ -213,23 +222,23 @@ impl TryFrom<&'_ CollationSecondedSignal> for BlockAnnounceData { /// it. However, if the announcement is for a block below the tip the announcement is accepted /// as it probably comes from a node that is currently syncing the chain. #[derive(Clone)] -pub struct BlockAnnounceValidator { +pub struct RequireSecondedInBlockAnnounce { phantom: PhantomData, relay_chain_interface: RCInterface, para_id: ParaId, } -impl BlockAnnounceValidator +impl RequireSecondedInBlockAnnounce where RCInterface: Clone, { - /// Create a new [`BlockAnnounceValidator`]. + /// Create a new [`RequireSecondedInBlockAnnounce`]. pub fn new(relay_chain_interface: RCInterface, para_id: ParaId) -> Self { Self { phantom: Default::default(), relay_chain_interface, para_id } } } -impl BlockAnnounceValidator +impl RequireSecondedInBlockAnnounce where RCInterface: RelayChainInterface + Clone, { @@ -314,7 +323,7 @@ where } impl BlockAnnounceValidatorT - for BlockAnnounceValidator + for RequireSecondedInBlockAnnounce where RCInterface: RelayChainInterface + Clone + 'static, { @@ -452,3 +461,51 @@ async fn wait_to_announce( ); } } + +/// A [`BlockAnnounceValidator`] which accepts all block announcements, as it assumes +/// sybil resistance is handled elsewhere. +#[derive(Debug, Clone)] +pub struct AssumeSybilResistance(bool); + +impl AssumeSybilResistance { + /// Instantiate this block announcement validator while permissively allowing (but ignoring) + /// announcements which come tagged with seconded messages. + /// + /// This is useful for backwards compatibility when upgrading nodes: old nodes will continue + /// to broadcast announcements with seconded messages, so these announcements shouldn't be rejected + /// and the peers not punished. + pub fn allow_seconded_messages() -> Self { + AssumeSybilResistance(true) + } + + /// Instantiate this block announcement validator while rejecting announcements that come with + /// data. + pub fn reject_seconded_messages() -> Self { + AssumeSybilResistance(false) + } +} + +impl BlockAnnounceValidatorT for AssumeSybilResistance { + fn validate( + &mut self, + _header: &Block::Header, + data: &[u8], + ) -> Pin> + Send>> { + let allow_seconded_messages = self.0; + let data = data.to_vec(); + + async move { + Ok(if data.is_empty() { + Validation::Success { is_new_best: false } + } else if !allow_seconded_messages { + Validation::Failure { disconnect: false } + } else { + match BlockAnnounceData::decode_all(&mut data.as_slice()) { + Ok(_) => Validation::Success { is_new_best: false }, + Err(_) => Validation::Failure { disconnect: true }, + } + }) + } + .boxed() + } +} diff --git a/client/network/src/tests.rs b/client/network/src/tests.rs index d2def635b5c..e1bb961c75f 100644 --- a/client/network/src/tests.rs +++ b/client/network/src/tests.rs @@ -255,11 +255,13 @@ impl RelayChainInterface for DummyRelayChainInterface { } } -fn make_validator_and_api( -) -> (BlockAnnounceValidator>, Arc) { +fn make_validator_and_api() -> ( + RequireSecondedInBlockAnnounce>, + Arc, +) { let relay_chain_interface = Arc::new(DummyRelayChainInterface::new()); ( - BlockAnnounceValidator::new(relay_chain_interface.clone(), ParaId::from(56)), + RequireSecondedInBlockAnnounce::new(relay_chain_interface.clone(), ParaId::from(56)), relay_chain_interface, ) } diff --git a/client/relay-chain-interface/src/lib.rs b/client/relay-chain-interface/src/lib.rs index 9f7156f90e4..a0258e20632 100644 --- a/client/relay-chain-interface/src/lib.rs +++ b/client/relay-chain-interface/src/lib.rs @@ -111,7 +111,7 @@ pub trait RelayChainInterface: Send + Sync { /// Get the hash of the current best block. async fn best_block_hash(&self) -> RelayChainResult; - /// Fetch the block header of a given height + /// Fetch the block header of a given hash or height, if it exists. async fn header(&self, block_id: BlockId) -> RelayChainResult>; /// Get the hash of the finalized block. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 42b9916d468..117e203d1ab 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -20,7 +20,7 @@ use cumulus_client_cli::CollatorOptions; use cumulus_client_consensus_common::ParachainConsensus; -use cumulus_client_network::BlockAnnounceValidator; +use cumulus_client_network::RequireSecondedInBlockAnnounce; use cumulus_client_pov_recovery::{PoVRecovery, RecoveryDelayRange, RecoveryHandle}; use cumulus_primitives_core::{CollectCollationInfo, ParaId}; use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; @@ -361,7 +361,8 @@ where _ => None, }; - let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface, para_id); + let block_announce_validator = + RequireSecondedInBlockAnnounce::new(relay_chain_interface, para_id); let block_announce_validator_builder = move |_| Box::new(block_announce_validator) as Box<_>; sc_service::build_network(sc_service::BuildNetworkParams { diff --git a/primitives/aura/Cargo.toml b/primitives/aura/Cargo.toml new file mode 100644 index 00000000000..ca6eadf25f1 --- /dev/null +++ b/primitives/aura/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "cumulus-primitives-aura" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2021" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } + +# Substrate +sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } + +# Polkadot +polkadot-core-primitives = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "sp-api/std", + "sp-consensus-aura/std", + "sp-runtime/std", + "sp-std/std", + "polkadot-core-primitives/std", + "polkadot-primitives/std", +] diff --git a/primitives/aura/src/lib.rs b/primitives/aura/src/lib.rs new file mode 100644 index 00000000000..a0d7a0206a6 --- /dev/null +++ b/primitives/aura/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Core primitives for Aura in Cumulus. +//! +//! In particular, this exposes the [`AuraUnincludedSegmentApi`] which is used to regulate +//! the behavior of Aura within a parachain context. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use sp_consensus_aura::Slot; + +sp_api::decl_runtime_apis! { + /// This runtime API is used to inform potential block authors whether they will + /// have the right to author at a slot, assuming they have claimed the slot. + /// + /// In particular, this API allows Aura-based parachains to regulate their "unincluded segment", + /// which is the section of the head of the chain which has not yet been made available in the + /// relay chain. + /// + /// When the unincluded segment is short, Aura chains will allow authors to create multiple + /// blocks per slot in order to build a backlog. When it is saturated, this API will limit + /// the amount of blocks that can be created. + pub trait AuraUnincludedSegmentApi { + /// Whether it is legal to extend the chain, assuming the given block is the most + /// recently included one as-of the relay parent that will be built against, and + /// the given slot. + /// + /// This should be consistent with the logic the runtime uses when validating blocks to + /// avoid issues. + /// + /// When the unincluded segment is empty, i.e. `included_hash == at`, where at is the block + /// whose state we are querying against, this must always return `true` as long as the slot + /// is more recent than the included block itself. + fn can_build_upon(included_hash: Block::Hash, slot: Slot) -> bool; + } +}