diff --git a/Cargo.lock b/Cargo.lock index f02918a49abe..c7712363e79d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2378,9 +2378,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "tinyvec", ] @@ -7890,7 +7890,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ - "bs58 0.5.0", + "bs58 0.5.1", "ed25519-dalek", "hkdf", "multihash 0.19.1", @@ -13641,7 +13641,7 @@ name = "polkadot-node-metrics" version = "7.0.0" dependencies = [ "assert_cmd", - "bs58 0.5.0", + "bs58 0.5.1", "futures", "futures-timer", "http-body-util", @@ -14085,7 +14085,7 @@ dependencies = [ name = "polkadot-runtime-metrics" version = "7.0.0" dependencies = [ - "bs58 0.5.0", + "bs58 0.5.1", "frame-benchmarking", "parity-scale-codec", "polkadot-primitives", @@ -16062,18 +16062,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acde58d073e9c79da00f2b5b84eed919c8326832648a5b109b3fce1bb1175280" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.36", @@ -17974,7 +17974,7 @@ dependencies = [ name = "sc-network-types" version = "0.10.0" dependencies = [ - "bs58 0.5.0", + "bs58 0.5.1", "ed25519-dalek", "libp2p-identity", "litep2p", @@ -19254,7 +19254,7 @@ dependencies = [ "base64 0.21.2", "bip39", "blake2-rfc", - "bs58 0.5.0", + "bs58 0.5.1", "chacha20", "crossbeam-queue", "derive_more", @@ -20099,7 +20099,7 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", - "bs58 0.5.0", + "bs58 0.5.1", "criterion", "dyn-clonable", "ed25519-zebra", diff --git a/Cargo.toml b/Cargo.toml index 1950eb2c8d3a..85f21be900bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -648,7 +648,7 @@ bridge-hub-test-utils = { path = "cumulus/parachains/runtimes/bridge-hubs/test-u bridge-hub-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend" } bridge-hub-westend-runtime = { path = "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } bridge-runtime-common = { path = "bridges/bin/runtime-common", default-features = false } -bs58 = { version = "0.5.0", default-features = false } +bs58 = { version = "0.5.1", default-features = false } build-helper = { version = "0.1.1" } byte-slice-cast = { version = "1.2.1", default-features = false } byteorder = { version = "1.3.2", default-features = false } @@ -1086,7 +1086,7 @@ rand_distr = { version = "0.4.3" } rand_pcg = { version = "0.3.1" } rayon = { version = "1.5.1" } rbtag = { version = "0.3" } -ref-cast = { version = "1.0.0" } +ref-cast = { version = "1.0.23" } regex = { version = "1.10.2" } relay-substrate-client = { path = "bridges/relays/client-substrate" } relay-utils = { path = "bridges/relays/utils" } diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index d48fb08a311c..3462aaef1f69 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -320,7 +320,7 @@ enum Resend { /// It tracks metadata about our view of the unfinalized chain, /// which assignments and approvals we have seen, and our peers' views. #[derive(Default)] -struct State { +pub struct State { /// These two fields are used in conjunction to construct a view over the unfinalized chain. blocks_by_number: BTreeMap>, blocks: HashMap, @@ -662,9 +662,13 @@ enum PendingMessage { #[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)] impl State { - async fn handle_network_msg( + async fn handle_network_msg< + N: overseer::SubsystemSender, + A: overseer::SubsystemSender, + >( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, event: NetworkBridgeEvent, rng: &mut (impl CryptoRng + Rng), @@ -689,7 +693,7 @@ impl State { }, NetworkBridgeEvent::NewGossipTopology(topology) => { self.handle_new_session_topology( - ctx, + network_sender, topology.session, topology.topology, topology.local_index, @@ -697,7 +701,7 @@ impl State { .await; }, NetworkBridgeEvent::PeerViewChange(peer_id, view) => { - self.handle_peer_view_change(ctx, metrics, peer_id, view, rng).await; + self.handle_peer_view_change(network_sender, metrics, peer_id, view, rng).await; }, NetworkBridgeEvent::OurViewChange(view) => { gum::trace!(target: LOG_TARGET, ?view, "Own view change"); @@ -720,7 +724,15 @@ impl State { }); }, NetworkBridgeEvent::PeerMessage(peer_id, message) => { - self.process_incoming_peer_message(ctx, metrics, peer_id, message, rng).await; + self.process_incoming_peer_message( + approval_voting_sender, + network_sender, + metrics, + peer_id, + message, + rng, + ) + .await; }, NetworkBridgeEvent::UpdatedAuthorityIds(peer_id, authority_ids) => { gum::debug!(target: LOG_TARGET, ?peer_id, ?authority_ids, "Update Authority Ids"); @@ -743,7 +755,7 @@ impl State { let view_intersection = View::new(intersection.cloned(), view.finalized_number); Self::unify_with_peer( - ctx.sender(), + network_sender, metrics, &mut self.blocks, &self.topologies, @@ -761,9 +773,13 @@ impl State { } } - async fn handle_new_blocks( + async fn handle_new_blocks< + N: overseer::SubsystemSender, + A: overseer::SubsystemSender, + >( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, metas: Vec, rng: &mut (impl CryptoRng + Rng), @@ -814,12 +830,11 @@ impl State { ); { - let sender = ctx.sender(); for (peer_id, PeerEntry { view, version }) in self.peer_views.iter() { let intersection = view.iter().filter(|h| new_hashes.contains(h)); let view_intersection = View::new(intersection.cloned(), view.finalized_number); Self::unify_with_peer( - sender, + network_sender, metrics, &mut self.blocks, &self.topologies, @@ -866,7 +881,8 @@ impl State { match message { PendingMessage::Assignment(assignment, claimed_indices) => { self.import_and_circulate_assignment( - ctx, + approval_voting_sender, + network_sender, metrics, MessageSource::Peer(peer_id), assignment, @@ -877,7 +893,8 @@ impl State { }, PendingMessage::Approval(approval_vote) => { self.import_and_circulate_approval( - ctx, + approval_voting_sender, + network_sender, metrics, MessageSource::Peer(peer_id), approval_vote, @@ -889,12 +906,12 @@ impl State { } } - self.enable_aggression(ctx, Resend::Yes, metrics).await; + self.enable_aggression(network_sender, Resend::Yes, metrics).await; } - async fn handle_new_session_topology( + async fn handle_new_session_topology>( &mut self, - ctx: &mut Context, + network_sender: &mut N, session: SessionIndex, topology: SessionGridTopology, local_index: Option, @@ -908,7 +925,7 @@ impl State { let topology = self.topologies.get_topology(session).expect("just inserted above; qed"); adjust_required_routing_and_propagate( - ctx, + network_sender, &mut self.blocks, &self.topologies, |block_entry| block_entry.session == session, @@ -926,14 +943,17 @@ impl State { .await; } - async fn process_incoming_assignments( + async fn process_incoming_assignments( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, peer_id: PeerId, assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, rng: &mut R, ) where + A: overseer::SubsystemSender, + N: overseer::SubsystemSender, R: CryptoRng + Rng, { for (assignment, claimed_indices) in assignments { @@ -956,7 +976,8 @@ impl State { } self.import_and_circulate_assignment( - ctx, + approval_voting_sender, + network_sender, metrics, MessageSource::Peer(peer_id), assignment, @@ -968,9 +989,13 @@ impl State { } // Entry point for processing an approval coming from a peer. - async fn process_incoming_approvals( + async fn process_incoming_approvals< + N: overseer::SubsystemSender, + A: overseer::SubsystemSender, + >( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, peer_id: PeerId, approvals: Vec, @@ -1001,7 +1026,8 @@ impl State { } self.import_and_circulate_approval( - ctx, + approval_voting_sender, + network_sender, metrics, MessageSource::Peer(peer_id), approval_vote, @@ -1010,9 +1036,10 @@ impl State { } } - async fn process_incoming_peer_message( + async fn process_incoming_peer_message( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, peer_id: PeerId, msg: Versioned< @@ -1022,6 +1049,8 @@ impl State { >, rng: &mut R, ) where + A: overseer::SubsystemSender, + N: overseer::SubsystemSender, R: CryptoRng + Rng, { match msg { @@ -1033,10 +1062,11 @@ impl State { "Processing assignments from a peer", ); let sanitized_assignments = - self.sanitize_v2_assignments(peer_id, ctx.sender(), assignments).await; + self.sanitize_v2_assignments(peer_id, network_sender, assignments).await; self.process_incoming_assignments( - ctx, + approval_voting_sender, + network_sender, metrics, peer_id, sanitized_assignments, @@ -1054,10 +1084,11 @@ impl State { ); let sanitized_assignments = - self.sanitize_v1_assignments(peer_id, ctx.sender(), assignments).await; + self.sanitize_v1_assignments(peer_id, network_sender, assignments).await; self.process_incoming_assignments( - ctx, + approval_voting_sender, + network_sender, metrics, peer_id, sanitized_assignments, @@ -1067,25 +1098,37 @@ impl State { }, Versioned::V3(protocol_v3::ApprovalDistributionMessage::Approvals(approvals)) => { let sanitized_approvals = - self.sanitize_v2_approvals(peer_id, ctx.sender(), approvals).await; - self.process_incoming_approvals(ctx, metrics, peer_id, sanitized_approvals) - .await; + self.sanitize_v2_approvals(peer_id, network_sender, approvals).await; + self.process_incoming_approvals( + approval_voting_sender, + network_sender, + metrics, + peer_id, + sanitized_approvals, + ) + .await; }, Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | Versioned::V2(protocol_v2::ApprovalDistributionMessage::Approvals(approvals)) => { let sanitized_approvals = - self.sanitize_v1_approvals(peer_id, ctx.sender(), approvals).await; - self.process_incoming_approvals(ctx, metrics, peer_id, sanitized_approvals) - .await; + self.sanitize_v1_approvals(peer_id, network_sender, approvals).await; + self.process_incoming_approvals( + approval_voting_sender, + network_sender, + metrics, + peer_id, + sanitized_approvals, + ) + .await; }, } } // handle a peer view change: requires that the peer is already connected // and has an entry in the `PeerData` struct. - async fn handle_peer_view_change( + async fn handle_peer_view_change, R>( &mut self, - ctx: &mut Context, + network_sender: &mut N, metrics: &Metrics, peer_id: PeerId, view: View, @@ -1132,7 +1175,7 @@ impl State { } Self::unify_with_peer( - ctx.sender(), + network_sender, metrics, &mut self.blocks, &self.topologies, @@ -1146,9 +1189,9 @@ impl State { .await; } - async fn handle_block_finalized( + async fn handle_block_finalized>( &mut self, - ctx: &mut Context, + network_sender: &mut N, metrics: &Metrics, finalized_number: BlockNumber, ) { @@ -1172,18 +1215,21 @@ impl State { // If a block was finalized, this means we may need to move our aggression // forward to the now oldest block(s). - self.enable_aggression(ctx, Resend::No, metrics).await; + self.enable_aggression(network_sender, Resend::No, metrics).await; } - async fn import_and_circulate_assignment( + async fn import_and_circulate_assignment( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, source: MessageSource, assignment: IndirectAssignmentCertV2, claimed_candidate_indices: CandidateBitfield, rng: &mut R, ) where + A: overseer::SubsystemSender, + N: overseer::SubsystemSender, R: CryptoRng + Rng, { let _span = self @@ -1218,7 +1264,7 @@ impl State { if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) { modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_UNEXPECTED_MESSAGE, ) @@ -1255,7 +1301,7 @@ impl State { modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_DUPLICATE_MESSAGE, ) @@ -1283,7 +1329,7 @@ impl State { ); modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_UNEXPECTED_MESSAGE, ) @@ -1296,7 +1342,7 @@ impl State { if entry.knowledge.contains(&message_subject, message_kind) { modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, BENEFIT_VALID_MESSAGE, ) @@ -1311,12 +1357,13 @@ impl State { let (tx, rx) = oneshot::channel(); - ctx.send_message(ApprovalVotingMessage::CheckAndImportAssignment( - assignment.clone(), - claimed_candidate_indices.clone(), - tx, - )) - .await; + approval_voting_sender + .send_message(ApprovalVotingMessage::CheckAndImportAssignment( + assignment.clone(), + claimed_candidate_indices.clone(), + tx, + )) + .await; let timer = metrics.time_awaiting_approval_voting(); let result = match rx.await { @@ -1339,7 +1386,7 @@ impl State { AssignmentCheckResult::Accepted => { modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, BENEFIT_VALID_MESSAGE_FIRST, ) @@ -1375,7 +1422,7 @@ impl State { ); modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE, ) @@ -1394,7 +1441,7 @@ impl State { ); modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_INVALID_MESSAGE, ) @@ -1526,14 +1573,16 @@ impl State { }) .collect::>(); - send_assignments_batched(ctx.sender(), assignments, &peers).await; + send_assignments_batched(network_sender, assignments, &peers).await; } } // Checks if an approval can be processed. // Returns true if we can continue with processing the approval and false otherwise. - async fn check_approval_can_be_processed( - ctx: &mut Context, + async fn check_approval_can_be_processed< + N: overseer::SubsystemSender, + >( + network_sender: &mut N, assignments_knowledge_key: &Vec<(MessageSubject, MessageKind)>, approval_knowledge_key: &(MessageSubject, MessageKind), entry: &mut BlockEntry, @@ -1549,7 +1598,8 @@ impl State { ?message_subject, "Unknown approval assignment", ); - modify_reputation(reputation, ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; + modify_reputation(reputation, network_sender, peer_id, COST_UNEXPECTED_MESSAGE) + .await; metrics.on_approval_unknown_assignment(); return false } @@ -1573,7 +1623,7 @@ impl State { modify_reputation( reputation, - ctx.sender(), + network_sender, peer_id, COST_DUPLICATE_MESSAGE, ) @@ -1590,7 +1640,8 @@ impl State { ?approval_knowledge_key, "Approval from a peer is out of view", ); - modify_reputation(reputation, ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; + modify_reputation(reputation, network_sender, peer_id, COST_UNEXPECTED_MESSAGE) + .await; metrics.on_approval_out_of_view(); }, } @@ -1605,16 +1656,20 @@ impl State { // We already processed this approval no need to continue. gum::trace!(target: LOG_TARGET, ?peer_id, ?approval_knowledge_key, "Known approval"); metrics.on_approval_good_known(); - modify_reputation(reputation, ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; + modify_reputation(reputation, network_sender, peer_id, BENEFIT_VALID_MESSAGE).await; false } else { true } } - async fn import_and_circulate_approval( + async fn import_and_circulate_approval< + N: overseer::SubsystemSender, + A: overseer::SubsystemSender, + >( &mut self, - ctx: &mut Context, + approval_voting_sender: &mut A, + network_sender: &mut N, metrics: &Metrics, source: MessageSource, vote: IndirectSignedApprovalVoteV2, @@ -1652,7 +1707,7 @@ impl State { ); modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_UNEXPECTED_MESSAGE, ) @@ -1672,7 +1727,7 @@ impl State { if let Some(peer_id) = source.peer_id() { if !Self::check_approval_can_be_processed( - ctx, + network_sender, &assignments_knowledge_keys, &approval_knwowledge_key, entry, @@ -1687,7 +1742,8 @@ impl State { let (tx, rx) = oneshot::channel(); - ctx.send_message(ApprovalVotingMessage::CheckAndImportApproval(vote.clone(), tx)) + approval_voting_sender + .send_message(ApprovalVotingMessage::CheckAndImportApproval(vote.clone(), tx)) .await; let timer = metrics.time_awaiting_approval_voting(); @@ -1711,7 +1767,7 @@ impl State { ApprovalCheckResult::Accepted => { modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, BENEFIT_VALID_MESSAGE_FIRST, ) @@ -1729,7 +1785,7 @@ impl State { ApprovalCheckResult::Bad(error) => { modify_reputation( &mut self.reputation, - ctx.sender(), + network_sender, peer_id, COST_INVALID_MESSAGE, ) @@ -1831,7 +1887,7 @@ impl State { num_peers = peers.len(), "Sending an approval to peers", ); - send_approvals_batched(ctx.sender(), approvals, &peers).await; + send_approvals_batched(network_sender, approvals, &peers).await; } } @@ -1882,7 +1938,7 @@ impl State { } async fn unify_with_peer( - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, metrics: &Metrics, entries: &mut HashMap, topologies: &SessionGridTopologies, @@ -2027,9 +2083,9 @@ impl State { // // In order to switch to using approval lag as a trigger we need a request/response protocol // to fetch votes from validators rather than use gossip. - async fn enable_aggression( + async fn enable_aggression>( &mut self, - ctx: &mut Context, + network_sender: &mut N, resend: Resend, metrics: &Metrics, ) { @@ -2058,7 +2114,7 @@ impl State { gum::debug!(target: LOG_TARGET, min_age, max_age, "Aggression enabled",); adjust_required_routing_and_propagate( - ctx, + network_sender, &mut self.blocks, &self.topologies, |block_entry| { @@ -2086,7 +2142,7 @@ impl State { .await; adjust_required_routing_and_propagate( - ctx, + network_sender, &mut self.blocks, &self.topologies, |block_entry| { @@ -2137,7 +2193,7 @@ impl State { async fn sanitize_v1_assignments( &mut self, peer_id: PeerId, - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, ) -> Vec<(IndirectAssignmentCertV2, CandidateBitfield)> { let mut sanitized_assignments = Vec::new(); @@ -2172,7 +2228,7 @@ impl State { async fn sanitize_v2_assignments( &mut self, peer_id: PeerId, - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, assignments: Vec<(IndirectAssignmentCertV2, CandidateBitfield)>, ) -> Vec<(IndirectAssignmentCertV2, CandidateBitfield)> { let mut sanitized_assignments = Vec::new(); @@ -2216,7 +2272,7 @@ impl State { async fn sanitize_v1_approvals( &mut self, peer_id: PeerId, - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, approval: Vec, ) -> Vec { let mut sanitized_approvals = Vec::new(); @@ -2243,7 +2299,7 @@ impl State { async fn sanitize_v2_approvals( &mut self, peer_id: PeerId, - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, approval: Vec, ) -> Vec { let mut sanitized_approvals = Vec::new(); @@ -2280,8 +2336,12 @@ impl State { // Note that the required routing of a message can be modified even if the // topology is unknown yet. #[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)] -async fn adjust_required_routing_and_propagate( - ctx: &mut Context, +async fn adjust_required_routing_and_propagate< + N: overseer::SubsystemSender, + BlockFilter, + RoutingModifier, +>( + network_sender: &mut N, blocks: &mut HashMap, topologies: &SessionGridTopologies, block_filter: BlockFilter, @@ -2363,7 +2423,7 @@ async fn adjust_required_routing_and_propagate, peer_id: PeerId, rep: Rep, ) { @@ -2414,7 +2474,6 @@ impl ApprovalDistribution { async fn run(self, ctx: Context) { let mut state = State::default(); - // According to the docs of `rand`, this is a ChaCha12 RNG in practice // and will always be chosen for strong performance and security properties. let mut rng = rand::rngs::StdRng::from_entropy(); @@ -2431,7 +2490,8 @@ impl ApprovalDistribution { ) { let new_reputation_delay = || futures_timer::Delay::new(reputation_interval).fuse(); let mut reputation_delay = new_reputation_delay(); - + let mut approval_voting_sender = ctx.sender().clone(); + let mut network_sender = ctx.sender().clone(); loop { select! { _ = reputation_delay => { @@ -2446,35 +2506,65 @@ impl ApprovalDistribution { return }, }; - match message { - FromOrchestra::Communication { msg } => - Self::handle_incoming(&mut ctx, state, msg, &self.metrics, rng).await, - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); - // the relay chain blocks relevant to the approval subsystems - // are those that are available, but not finalized yet - // activated and deactivated heads hence are irrelevant to this subsystem, other than - // for tracing purposes. - if let Some(activated) = update.activated { - let head = activated.hash; - let approval_distribution_span = - jaeger::PerLeafSpan::new(activated.span, "approval-distribution"); - state.spans.insert(head, approval_distribution_span); - } - }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, number)) => { - gum::trace!(target: LOG_TARGET, number = %number, "finalized signal"); - state.handle_block_finalized(&mut ctx, &self.metrics, number).await; - }, - FromOrchestra::Signal(OverseerSignal::Conclude) => return, - } + + + self.handle_from_orchestra(message, &mut approval_voting_sender, &mut network_sender, state, rng).await; + }, } } } - async fn handle_incoming( - ctx: &mut Context, + /// Handles a from orchestra message received by approval distribution subystem. + pub async fn handle_from_orchestra< + N: overseer::SubsystemSender, + A: overseer::SubsystemSender, + >( + &self, + message: FromOrchestra, + approval_voting_sender: &mut A, + network_sender: &mut N, + state: &mut State, + rng: &mut (impl CryptoRng + Rng), + ) { + match message { + FromOrchestra::Communication { msg } => + Self::handle_incoming( + approval_voting_sender, + network_sender, + state, + msg, + &self.metrics, + rng, + ) + .await, + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "active leaves signal (ignored)"); + // the relay chain blocks relevant to the approval subsystems + // are those that are available, but not finalized yet + // activated and deactivated heads hence are irrelevant to this subsystem, other + // than for tracing purposes. + if let Some(activated) = update.activated { + let head = activated.hash; + let approval_distribution_span = + jaeger::PerLeafSpan::new(activated.span, "approval-distribution"); + state.spans.insert(head, approval_distribution_span); + } + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, number)) => { + gum::trace!(target: LOG_TARGET, number = %number, "finalized signal"); + state.handle_block_finalized(network_sender, &self.metrics, number).await; + }, + FromOrchestra::Signal(OverseerSignal::Conclude) => return, + } + } + + async fn handle_incoming< + N: overseer::SubsystemSender, + A: overseer::SubsystemSender, + >( + approval_voting_sender: &mut A, + network_sender: &mut N, state: &mut State, msg: ApprovalDistributionMessage, metrics: &Metrics, @@ -2482,10 +2572,14 @@ impl ApprovalDistribution { ) { match msg { ApprovalDistributionMessage::NetworkBridgeUpdate(event) => { - state.handle_network_msg(ctx, metrics, event, rng).await; + state + .handle_network_msg(approval_voting_sender, network_sender, metrics, event, rng) + .await; }, ApprovalDistributionMessage::NewBlocks(metas) => { - state.handle_new_blocks(ctx, metrics, metas, rng).await; + state + .handle_new_blocks(approval_voting_sender, network_sender, metrics, metas, rng) + .await; }, ApprovalDistributionMessage::DistributeAssignment(cert, candidate_indices) => { let _span = state @@ -2506,7 +2600,8 @@ impl ApprovalDistribution { state .import_and_circulate_assignment( - ctx, + approval_voting_sender, + network_sender, &metrics, MessageSource::Local, cert, @@ -2524,7 +2619,13 @@ impl ApprovalDistribution { ); state - .import_and_circulate_approval(ctx, metrics, MessageSource::Local, vote) + .import_and_circulate_approval( + approval_voting_sender, + network_sender, + metrics, + MessageSource::Local, + vote, + ) .await; }, ApprovalDistributionMessage::GetApprovalSignatures(indices, tx) => { @@ -2579,7 +2680,7 @@ pub const MAX_APPROVAL_BATCH_SIZE: usize = ensure_size_not_zero( // Low level helper for sending assignments. async fn send_assignments_batched_inner( - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, batch: impl IntoIterator, peers: Vec, peer_version: ValidationVersion, @@ -2634,7 +2735,7 @@ async fn send_assignments_batched_inner( /// destination, such that the subsystem doesn't get stuck for long processing a batch /// of assignments and can `select!` other tasks. pub(crate) async fn send_assignments_batched( - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + network_sender: &mut impl overseer::SubsystemSender, v2_assignments: impl IntoIterator + Clone, peers: &[(PeerId, ProtocolVersion)], ) { @@ -2658,7 +2759,7 @@ pub(crate) async fn send_assignments_batched( let batch: Vec<_> = v1_batches.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect(); if !v1_peers.is_empty() { send_assignments_batched_inner( - sender, + network_sender, batch.clone(), v1_peers.clone(), ValidationVersion::V1, @@ -2668,7 +2769,7 @@ pub(crate) async fn send_assignments_batched( if !v2_peers.is_empty() { send_assignments_batched_inner( - sender, + network_sender, batch, v2_peers.clone(), ValidationVersion::V2, @@ -2683,15 +2784,20 @@ pub(crate) async fn send_assignments_batched( while v3.peek().is_some() { let batch = v3.by_ref().take(MAX_ASSIGNMENT_BATCH_SIZE).collect::>(); - send_assignments_batched_inner(sender, batch, v3_peers.clone(), ValidationVersion::V3) - .await; + send_assignments_batched_inner( + network_sender, + batch, + v3_peers.clone(), + ValidationVersion::V3, + ) + .await; } } } /// Send approvals while honoring the `max_notification_size` of the protocol and peer version. pub(crate) async fn send_approvals_batched( - sender: &mut impl overseer::ApprovalDistributionSenderTrait, + sender: &mut impl overseer::SubsystemSender, approvals: impl IntoIterator + Clone, peers: &[(PeerId, ProtocolVersion)], ) { diff --git a/prdoc/pr_4845.prdoc b/prdoc/pr_4845.prdoc new file mode 100644 index 000000000000..012d34ef090e --- /dev/null +++ b/prdoc/pr_4845.prdoc @@ -0,0 +1,13 @@ +title: Make approval-distribution logic runnable on a separate thread + +doc: + - audience: Node Dev + description: | + Pass SubsystemSender trait inside approval-distribution instead of passing SubsystemContext everywhere. + + This allows us in the future to be able to run multiple approval-distribution instances on different workers. + + +crates: +- name: polkadot-approval-distribution + bump: minor diff --git a/prdoc/pr_5036.prdoc b/prdoc/pr_5036.prdoc new file mode 100644 index 000000000000..e9f21f823b64 --- /dev/null +++ b/prdoc/pr_5036.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "[pallet_contracts] Modify the storage host function benchmarks to be run on an unbalanced storage trie" + +doc: + - audience: Runtime User + description: | + This PR modifies the storage host function benchmarks. Previously, they were run + on an empty storage trie. Now, they are run on an unbalanced storage trie + to reflect the worst-case scenario. This approach increases the storage host + function weights and decreases the probability of DoS attacks. + +crates: + - name: pallet-contracts + bump: patch diff --git a/prdoc/pr_5153.prdoc b/prdoc/pr_5153.prdoc new file mode 100644 index 000000000000..4f43b52d8edf --- /dev/null +++ b/prdoc/pr_5153.prdoc @@ -0,0 +1,12 @@ +title: "Grandpa: Ensure voting doesn't fail after a re-org" + +doc: + - audience: Node Operator + description: | + Ensures that a node is still able to vote with Grandpa, when a re-org happened that + changed the best chain. This ultimately prevents that a network may runs into a + potential finality stall. + +crates: + - name: sc-consensus-grandpa + bump: patch diff --git a/substrate/client/consensus/beefy/rpc/src/lib.rs b/substrate/client/consensus/beefy/rpc/src/lib.rs index 66102eeb35c8..83477d223dd2 100644 --- a/substrate/client/consensus/beefy/rpc/src/lib.rs +++ b/substrate/client/consensus/beefy/rpc/src/lib.rs @@ -24,7 +24,10 @@ use parking_lot::RwLock; use sp_consensus_beefy::AuthorityIdBound; use std::sync::Arc; -use sc_rpc::{utils::pipe_from_stream, SubscriptionTaskExecutor}; +use sc_rpc::{ + utils::{BoundedVecDeque, PendingSubscription}, + SubscriptionTaskExecutor, +}; use sp_application_crypto::RuntimeAppPublic; use sp_runtime::traits::Block as BlockT; @@ -145,7 +148,10 @@ where .subscribe(100_000) .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); - sc_rpc::utils::spawn_subscription_task(&self.executor, pipe_from_stream(pending, stream)); + sc_rpc::utils::spawn_subscription_task( + &self.executor, + PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()), + ); } async fn latest_finalized(&self) -> Result { diff --git a/substrate/client/consensus/grandpa/rpc/src/lib.rs b/substrate/client/consensus/grandpa/rpc/src/lib.rs index 430525019dfb..a41b14299089 100644 --- a/substrate/client/consensus/grandpa/rpc/src/lib.rs +++ b/substrate/client/consensus/grandpa/rpc/src/lib.rs @@ -38,7 +38,10 @@ use finality::{EncodedFinalityProof, RpcFinalityProofProvider}; use notification::JustificationNotification; use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; use sc_consensus_grandpa::GrandpaJustificationStream; -use sc_rpc::{utils::pipe_from_stream, SubscriptionTaskExecutor}; +use sc_rpc::{ + utils::{BoundedVecDeque, PendingSubscription}, + SubscriptionTaskExecutor, +}; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// Provides RPC methods for interacting with GRANDPA. @@ -108,7 +111,10 @@ where }, ); - sc_rpc::utils::spawn_subscription_task(&self.executor, pipe_from_stream(pending, stream)); + sc_rpc::utils::spawn_subscription_task( + &self.executor, + PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()), + ); } async fn prove_finality( diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs index 6199e8a97d99..a618b7ff07ad 100644 --- a/substrate/client/consensus/grandpa/src/environment.rs +++ b/substrate/client/consensus/grandpa/src/environment.rs @@ -1214,14 +1214,20 @@ where .header(target_hash)? .expect("Header known to exist after `finality_target` call; qed"), Err(err) => { - warn!( + debug!( target: LOG_TARGET, "Encountered error finding best chain containing {:?}: couldn't find target block: {}", block, err, ); - return Ok(None) + // NOTE: in case the given `SelectChain` doesn't provide any block we fallback to using + // the given base block provided by the GRANDPA voter. + // + // For example, `LongestChain` will error if the given block to use as base isn't part + // of the best chain (as defined by `LongestChain`), which could happen if there was a + // re-org. + base_header.clone() }, }; diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 14708cc89e89..2aa1b5f6ee1b 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -1820,6 +1820,116 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ ); } +// This is a regression test for an issue that was triggered by a reorg +// - https://github.com/paritytech/polkadot-sdk/issues/3487 +// - https://github.com/humanode-network/humanode/issues/1104 +#[tokio::test] +async fn grandpa_environment_uses_round_base_block_for_voting_if_finality_target_errors() { + use finality_grandpa::voter::Environment; + use sp_consensus::SelectChain; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1, 0); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); + let link = peer.data.lock().take().unwrap(); + let client = peer.client().as_client().clone(); + let select_chain = sc_consensus::LongestChain::new(peer.client().as_backend()); + + // create a chain that is 10 blocks long + peer.push_blocks(10, false); + + let env = test_environment_with_select_chain( + &link, + None, + network_service.clone(), + sync_service, + notification_service, + select_chain.clone(), + VotingRulesBuilder::default().build(), + ); + + let hashof7 = client.expect_block_hash_from_id(&BlockId::Number(7)).unwrap(); + let hashof8_a = client.expect_block_hash_from_id(&BlockId::Number(8)).unwrap(); + + // finalize the 7th block + peer.client().finalize_block(hashof7, None, false).unwrap(); + + assert_eq!(peer.client().info().finalized_hash, hashof7); + + // simulate completed grandpa round + env.completed( + 1, + finality_grandpa::round::State { + prevote_ghost: Some((hashof8_a, 8)), + finalized: Some((hashof7, 7)), + estimate: Some((hashof8_a, 8)), + completable: true, + }, + Default::default(), + &finality_grandpa::HistoricalVotes::new(), + ) + .unwrap(); + + // check simulated last completed round + assert_eq!( + env.voter_set_state.read().last_completed_round().state, + finality_grandpa::round::State { + prevote_ghost: Some((hashof8_a, 8)), + finalized: Some((hashof7, 7)), + estimate: Some((hashof8_a, 8)), + completable: true + } + ); + + // `hashof8_a` should be finalized next, `best_chain_containing` should return `hashof8_a` + assert_eq!(env.best_chain_containing(hashof8_a).await.unwrap().unwrap().0, hashof8_a); + + // simulate reorg on block 8 by creating a fork starting at block 7 that is 10 blocks long + peer.generate_blocks_at( + BlockId::Number(7), + 10, + BlockOrigin::File, + |mut builder| { + builder.push_deposit_log_digest_item(DigestItem::Other(vec![1])).unwrap(); + builder.build().unwrap().block + }, + false, + false, + true, + ForkChoiceStrategy::LongestChain, + ); + + // check that new best chain is on longest chain + assert_eq!(env.select_chain.best_chain().await.unwrap().number, 17); + + // verify that last completed round has `prevote_ghost` and `estimate` blocks related to + // `hashof8_a` + assert_eq!( + env.voter_set_state.read().last_completed_round().state, + finality_grandpa::round::State { + prevote_ghost: Some((hashof8_a, 8)), + finalized: Some((hashof7, 7)), + estimate: Some((hashof8_a, 8)), + completable: true + } + ); + + // `hashof8_a` should be finalized next, `best_chain_containing` should still return `hashof8_a` + assert_eq!(env.best_chain_containing(hashof8_a).await.unwrap().unwrap().0, hashof8_a); + + // simulate finalization of the `hashof8_a` block + peer.client().finalize_block(hashof8_a, None, false).unwrap(); + + // check that best chain is reorged back + assert_eq!(env.select_chain.best_chain().await.unwrap().number, 10); +} + #[tokio::test] async fn grandpa_environment_never_overwrites_round_voter_state() { use finality_grandpa::voter::Environment; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index a056b4d437c8..1bc5cecb205b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -37,14 +37,14 @@ use codec::Encode; use futures::{channel::oneshot, future::FutureExt}; use jsonrpsee::{ core::async_trait, server::ResponsePayload, types::SubscriptionId, ConnectionId, Extensions, - MethodResponseFuture, PendingSubscriptionSink, SubscriptionSink, + MethodResponseFuture, PendingSubscriptionSink, }; use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, StorageProvider, }; -use sc_rpc::utils::to_sub_message; +use sc_rpc::utils::Subscription; use sp_api::CallApiAt; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::{traits::CallContext, Bytes}; @@ -158,7 +158,7 @@ impl, Block: BlockT, Client> ChainHead { } /// Helper to convert the `subscription ID` to a string. -pub fn read_subscription_id_as_string(sink: &SubscriptionSink) -> String { +pub fn read_subscription_id_as_string(sink: &Subscription) -> String { match sink.subscription_id() { SubscriptionId::Num(n) => n.to_string(), SubscriptionId::Str(s) => s.into_owned().into(), @@ -213,7 +213,7 @@ where return }; - let Ok(sink) = pending.accept().await else { return }; + let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; let sub_id = read_subscription_id_as_string(&sink); // Keep track of the subscription. @@ -223,8 +223,7 @@ where // Inserting the subscription can only fail if the JsonRPSee generated a duplicate // subscription ID. debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription already accepted", sub_id); - let msg = to_sub_message(&sink, &FollowEvent::::Stop); - let _ = sink.send(msg).await; + let _ = sink.send(&FollowEvent::::Stop).await; return }; debug!(target: LOG_TARGET, "[follow][id={:?}] Subscription accepted", sub_id); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index 6dc3df76bdd7..1d28d2071248 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -31,12 +31,11 @@ use futures::{ stream::{self, Stream, StreamExt}, }; use futures_util::future::Either; -use jsonrpsee::SubscriptionSink; use log::debug; use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, }; -use sc_rpc::utils::to_sub_message; +use sc_rpc::utils::Subscription; use schnellru::{ByLength, LruMap}; use sp_api::CallApiAt; use sp_blockchain::{ @@ -597,7 +596,7 @@ where &mut self, startup_point: &StartupPoint, mut stream: EventStream, - sink: SubscriptionSink, + sink: Subscription, rx_stop: oneshot::Receiver<()>, ) -> Result<(), SubscriptionManagementError> where @@ -632,23 +631,20 @@ where self.sub_id, err ); - let msg = to_sub_message(&sink, &FollowEvent::::Stop); - let _ = sink.send(msg).await; + _ = sink.send(&FollowEvent::::Stop).await; return Err(err) }, }; for event in events { - let msg = to_sub_message(&sink, &event); - if let Err(err) = sink.send(msg).await { + if let Err(err) = sink.send(&event).await { // Failed to submit event. debug!( target: LOG_TARGET, "[follow][id={:?}] Failed to send event {:?}", self.sub_id, err ); - let msg = to_sub_message(&sink, &FollowEvent::::Stop); - let _ = sink.send(msg).await; + let _ = sink.send(&FollowEvent::::Stop).await; // No need to propagate this error further, the client disconnected. return Ok(()) } @@ -662,15 +658,14 @@ where // - the substrate streams have closed // - the `Stop` receiver was triggered internally (cannot hold the pinned block guarantee) // - the client disconnected. - let msg = to_sub_message(&sink, &FollowEvent::::Stop); - let _ = sink.send(msg).await; + let _ = sink.send(&FollowEvent::::Stop).await; Ok(()) } /// Generate the block events for the `chainHead_follow` method. pub async fn generate_events( &mut self, - sink: SubscriptionSink, + sink: Subscription, sub_data: InsertedSubscriptionData, ) -> Result<(), SubscriptionManagementError> { // Register for the new block and finalized notifications. @@ -698,8 +693,7 @@ where self.sub_id, err ); - let msg = to_sub_message(&sink, &FollowEvent::::Stop); - let _ = sink.send(msg).await; + let _ = sink.send(&FollowEvent::::Stop).await; return Err(err) }, }; diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs index 7ce85b9feafe..53c5b8ce3895 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs @@ -25,7 +25,7 @@ use codec::Encode; use jsonrpsee::rpc_params; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_core::H256; -use std::sync::Arc; +use std::{sync::Arc, vec}; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; @@ -149,3 +149,89 @@ async fn tx_with_pruned_best_block() { let event: TransactionEvent = get_next_event_sub!(&mut sub); assert_eq!(event, TransactionEvent::Finalized(TransactionBlock { hash: block_2, index: 0 })); } + +#[tokio::test] +async fn tx_slow_client_replace_old_messages() { + let (api, pool, client, tx_api, _exec_middleware, _pool_middleware) = setup_api_tx(); + let block_1_header = api.push_block(1, vec![], true); + client.set_best_block(block_1_header.hash(), 1); + + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + // The subscription itself has a buffer of length 1 and no way to create + // it without a buffer. + // + // Then `transactionWatch` has its own buffer of length 3 which leads to + // that it's limited to 5 items in the tests. + // + // 1. Send will complete immediately + // 2. Send will be pending in the subscription sink (not possible to cancel) + // 3. The rest of messages will be kept in a RingBuffer and older messages are replaced by newer + // items. + let mut sub = tx_api + .subscribe("transactionWatch_v1_submitAndWatch", rpc_params![&xt], 1) + .await + .unwrap(); + + // Import block 2 with the transaction included. + let block = api.push_block(2, vec![uxt.clone()], true); + let block_hash = block.hash(); + let event = ChainEvent::NewBestBlock { hash: block_hash, tree_route: None }; + pool.inner_pool.maintain(event).await; + + let mut block2_hash = None; + + // Import block 2 again without the transaction included. + for _ in 0..10 { + let block_not_imported = api.push_block(2, vec![], true); + let event = ChainEvent::NewBestBlock { hash: block_not_imported.hash(), tree_route: None }; + pool.inner_pool.maintain(event).await; + + let block2 = api.push_block(2, vec![uxt.clone()], true); + block2_hash = Some(block2.hash()); + let event = ChainEvent::NewBestBlock { hash: block2.hash(), tree_route: None }; + + pool.inner_pool.maintain(event).await; + } + + let block2_hash = block2_hash.unwrap(); + + // Finalize the transaction + let event = ChainEvent::Finalized { hash: block2_hash, tree_route: Arc::from(vec![]) }; + pool.inner_pool.maintain(event).await; + + // Hack to mimic a slow client. + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + // Read the events. + let mut res: Vec> = Vec::new(); + + while let Some(item) = tokio::time::timeout(std::time::Duration::from_secs(5), sub.next()) + .await + .unwrap() + { + let (ev, _) = item.unwrap(); + res.push(ev); + } + + // BestBlockIncluded(None) is dropped and not seen. + let exp = vec![ + // First message + TransactionEvent::Validated, + // Second message + TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { + hash: block_hash, + index: 0, + })), + // Most recent 3 messages. + TransactionEvent::Validated, + TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { + hash: block2_hash, + index: 0, + })), + TransactionEvent::Finalized(TransactionBlock { hash: block2_hash, index: 0 }), + ]; + + assert_eq!(res, exp); +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 723440d1b111..ac24ce960f61 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -30,7 +30,7 @@ use crate::{ use codec::Decode; use futures::{StreamExt, TryFutureExt}; use jsonrpsee::{core::async_trait, PendingSubscriptionSink}; -use sc_rpc::utils::{pipe_from_stream, to_sub_message}; +use sc_rpc::utils::{RingBuffer, Subscription}; use sc_transaction_pool_api::{ error::IntoPoolError, BlockHash, TransactionFor, TransactionPool, TransactionSource, TransactionStatus, @@ -84,16 +84,14 @@ where Err(e) => { log::debug!(target: LOG_TARGET, "Extrinsic bytes cannot be decoded: {:?}", e); - let Ok(sink) = pending.accept().await else { return }; + let Ok(sink) = pending.accept().await.map(Subscription::from) else { return }; // The transaction is invalid. - let msg = to_sub_message( - &sink, - &TransactionEvent::Invalid::>(TransactionError { + let _ = sink + .send(&TransactionEvent::Invalid::>(TransactionError { error: "Extrinsic bytes cannot be decoded".into(), - }), - ); - let _ = sink.send(msg).await; + })) + .await; return }, }; @@ -108,16 +106,23 @@ where .unwrap_or_else(|e| Error::Verification(Box::new(e))) }); + let Ok(sink) = pending.accept().await.map(Subscription::from) else { + return; + }; + match submit.await { Ok(stream) => { - let stream = stream.filter_map(move |event| async move { handle_event(event) }); - pipe_from_stream(pending, stream.boxed()).await; + let stream = + stream.filter_map(move |event| async move { handle_event(event) }).boxed(); + + // If the subscription is too slow older events will be overwritten. + sink.pipe_from_stream(stream, RingBuffer::new(3)).await; }, Err(err) => { // We have not created an `Watcher` for the tx. Make sure the // error is still propagated as an event. let event: TransactionEvent<::Hash> = err.into(); - pipe_from_stream(pending, futures::stream::once(async { event }).boxed()).await; + _ = sink.send(&event).await; }, }; }; diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index 975f66406a6a..2fc21a238bc9 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -24,7 +24,7 @@ mod tests; use std::sync::Arc; use crate::{ - utils::{pipe_from_stream, spawn_subscription_task}, + utils::{spawn_subscription_task, BoundedVecDeque, PendingSubscription}, SubscriptionTaskExecutor, }; @@ -202,7 +202,9 @@ where }, }; - pipe_from_stream(pending, stream).await; + PendingSubscription::from(pending) + .pipe_from_stream(stream, BoundedVecDeque::default()) + .await; }; spawn_subscription_task(&self.executor, fut); diff --git a/substrate/client/rpc/src/chain/chain_full.rs b/substrate/client/rpc/src/chain/chain_full.rs index 515c0f62c8ad..139c18b884ac 100644 --- a/substrate/client/rpc/src/chain/chain_full.rs +++ b/substrate/client/rpc/src/chain/chain_full.rs @@ -20,7 +20,7 @@ use super::{client_err, ChainBackend, Error}; use crate::{ - utils::{pipe_from_stream, spawn_subscription_task}, + utils::{spawn_subscription_task, BoundedVecDeque, PendingSubscription}, SubscriptionTaskExecutor, }; use std::{marker::PhantomData, sync::Arc}; @@ -142,6 +142,8 @@ fn subscribe_headers( // we set up the stream and chain it to the stream. Consuming code would need to handle // duplicates at the beginning of the stream though. let stream = stream::iter(maybe_header).chain(stream()); - - spawn_subscription_task(executor, pipe_from_stream(pending, stream)); + spawn_subscription_task( + executor, + PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()), + ); } diff --git a/substrate/client/rpc/src/state/state_full.rs b/substrate/client/rpc/src/state/state_full.rs index bda678c1b45e..7703936f8115 100644 --- a/substrate/client/rpc/src/state/state_full.rs +++ b/substrate/client/rpc/src/state/state_full.rs @@ -26,7 +26,7 @@ use super::{ ChildStateBackend, StateBackend, }; use crate::{ - utils::{pipe_from_stream, spawn_subscription_task}, + utils::{spawn_subscription_task, BoundedVecDeque, PendingSubscription}, DenyUnsafe, SubscriptionTaskExecutor, }; @@ -405,7 +405,10 @@ where }); let stream = futures::stream::once(future::ready(initial)).chain(version_stream); - spawn_subscription_task(&self.executor, pipe_from_stream(pending, stream)); + spawn_subscription_task( + &self.executor, + PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()), + ); } fn subscribe_storage( @@ -457,7 +460,10 @@ where .chain(storage_stream) .filter(|storage| future::ready(!storage.changes.is_empty())); - spawn_subscription_task(&self.executor, pipe_from_stream(pending, stream)); + spawn_subscription_task( + &self.executor, + PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()), + ); } fn trace_block( diff --git a/substrate/client/rpc/src/utils.rs b/substrate/client/rpc/src/utils.rs index bc566ed37f23..e2ff04c0baf3 100644 --- a/substrate/client/rpc/src/utils.rs +++ b/substrate/client/rpc/src/utils.rs @@ -23,25 +23,52 @@ use futures::{ future::{self, Either, Fuse, FusedFuture}, Future, FutureExt, Stream, StreamExt, }; -use jsonrpsee::{PendingSubscriptionSink, SubscriptionMessage, SubscriptionSink}; +use jsonrpsee::{ + types::SubscriptionId, DisconnectError, PendingSubscriptionSink, SubscriptionMessage, + SubscriptionSink, +}; use sp_runtime::Serialize; use std::collections::VecDeque; const DEFAULT_BUF_SIZE: usize = 16; -/// A simple bounded VecDeque. -struct BoundedVecDeque { +/// A trait representing a buffer which may or may not support +/// to replace items when the buffer is full. +pub trait Buffer { + /// The item type that the buffer holds. + type Item; + + /// Push an item to the buffer. + /// + /// Returns `Err` if the buffer doesn't support replacing older items + fn push(&mut self, item: Self::Item) -> Result<(), ()>; + /// Pop the next item from the buffer. + fn pop(&mut self) -> Option; +} + +/// A simple bounded buffer that will terminate the subscription if the buffer becomes full. +pub struct BoundedVecDeque { inner: VecDeque, max_cap: usize, } +impl Default for BoundedVecDeque { + fn default() -> Self { + Self { inner: VecDeque::with_capacity(DEFAULT_BUF_SIZE), max_cap: DEFAULT_BUF_SIZE } + } +} + impl BoundedVecDeque { /// Create a new bounded VecDeque. - fn new() -> Self { - Self { inner: VecDeque::with_capacity(DEFAULT_BUF_SIZE), max_cap: DEFAULT_BUF_SIZE } + pub fn new(cap: usize) -> Self { + Self { inner: VecDeque::with_capacity(cap), max_cap: cap } } +} + +impl Buffer for BoundedVecDeque { + type Item = T; - fn push_back(&mut self, item: T) -> Result<(), ()> { + fn push(&mut self, item: Self::Item) -> Result<(), ()> { if self.inner.len() >= self.max_cap { Err(()) } else { @@ -50,126 +77,187 @@ impl BoundedVecDeque { } } - fn pop_front(&mut self) -> Option { + fn pop(&mut self) -> Option { self.inner.pop_front() } } -/// Feed items to the subscription from the underlying stream. -/// -/// This is bounded because the underlying streams in substrate are -/// unbounded and if the subscription can't keep with stream it can -/// cause the buffer to become very large and consume lots of memory. -/// -/// In such cases the subscription is dropped. -pub async fn pipe_from_stream(pending: PendingSubscriptionSink, mut stream: S) -where - S: Stream + Unpin + Send + 'static, - T: Serialize + Send + 'static, -{ - let mut buf = BoundedVecDeque::new(); - let accept_fut = pending.accept(); - - futures::pin_mut!(accept_fut); - - // Poll the stream while waiting for the subscription to be accepted - // - // If the `max_cap` is exceeded then the subscription is dropped. - let sink = loop { - match future::select(accept_fut, stream.next()).await { - Either::Left((Ok(sink), _)) => break sink, - Either::Right((Some(msg), f)) => { - if buf.push_back(msg).is_err() { - log::warn!(target: "rpc", "Subscription::accept failed buffer limit={} exceeded; dropping subscription", buf.max_cap); - return - } - accept_fut = f; - }, - // The connection was closed or the stream was closed. - _ => return, +/// Fixed size ring buffer that replaces the oldest item when full. +#[derive(Debug)] +pub struct RingBuffer { + inner: VecDeque, + cap: usize, +} + +impl RingBuffer { + /// Create a new ring buffer. + pub fn new(cap: usize) -> Self { + Self { inner: VecDeque::with_capacity(cap), cap } + } +} + +impl Buffer for RingBuffer { + type Item = T; + + fn push(&mut self, item: T) -> Result<(), ()> { + if self.inner.len() >= self.cap { + self.inner.pop_front(); } - }; - inner_pipe_from_stream(sink, stream, buf).await + self.inner.push_back(item); + + Ok(()) + } + + fn pop(&mut self) -> Option { + self.inner.pop_front() + } } -async fn inner_pipe_from_stream( - sink: SubscriptionSink, - mut stream: S, - mut buf: BoundedVecDeque, -) where - S: Stream + Unpin + Send + 'static, - T: Serialize + Send + 'static, -{ - let mut next_fut = Box::pin(Fuse::terminated()); - let mut next_item = stream.next(); - let closed = sink.closed(); - - futures::pin_mut!(closed); - - loop { - if next_fut.is_terminated() { - if let Some(v) = buf.pop_front() { - let val = to_sub_message(&sink, &v); - next_fut.set(async { sink.send(val).await }.fuse()); +/// A pending subscription. +pub struct PendingSubscription(PendingSubscriptionSink); + +impl From for PendingSubscription { + fn from(p: PendingSubscriptionSink) -> Self { + Self(p) + } +} + +impl PendingSubscription { + /// Feed items to the subscription from the underlying stream + /// with specified buffer strategy. + pub async fn pipe_from_stream(self, mut stream: S, mut buf: B) + where + S: Stream + Unpin + Send + 'static, + T: Serialize + Send + 'static, + B: Buffer, + { + let method = self.0.method_name().to_string(); + let conn_id = self.0.connection_id().0; + let accept_fut = self.0.accept(); + + futures::pin_mut!(accept_fut); + + // Poll the stream while waiting for the subscription to be accepted + // + // If the `max_cap` is exceeded then the subscription is dropped. + let sink = loop { + match future::select(accept_fut, stream.next()).await { + Either::Left((Ok(sink), _)) => break sink, + Either::Right((Some(msg), f)) => { + if buf.push(msg).is_err() { + log::debug!(target: "rpc", "Subscription::accept buffer full for subscription={method} conn_id={conn_id}; dropping subscription"); + return + } + accept_fut = f; + }, + // The connection was closed or the stream was closed. + _ => return, } - } + }; - match future::select(closed, future::select(next_fut, next_item)).await { - // Send operation finished. - Either::Right((Either::Left((_, n)), c)) => { - next_item = n; - closed = c; - next_fut = Box::pin(Fuse::terminated()); - }, - // New item from the stream - Either::Right((Either::Right((Some(v), n)), c)) => { - if buf.push_back(v).is_err() { - log::warn!( - target: "rpc", - "Subscription buffer limit={} exceeded for subscription={} conn_id={}; dropping subscription", - buf.max_cap, - sink.method_name(), - sink.connection_id().0 - ); - return - } + Subscription(sink).pipe_from_stream(stream, buf).await + } +} - next_fut = n; - closed = c; - next_item = stream.next(); - }, - // Stream "finished". - // - // Process remaining items and terminate. - Either::Right((Either::Right((None, pending_fut)), _)) => { - if !pending_fut.is_terminated() && pending_fut.await.is_err() { - return; +/// An active subscription. +#[derive(Clone, Debug)] +pub struct Subscription(SubscriptionSink); + +impl From for Subscription { + fn from(sink: SubscriptionSink) -> Self { + Self(sink) + } +} + +impl Subscription { + /// Feed items to the subscription from the underlying stream + /// with specified buffer strategy. + pub async fn pipe_from_stream(self, mut stream: S, mut buf: B) + where + S: Stream + Unpin + Send + 'static, + T: Serialize + Send + 'static, + B: Buffer, + { + let mut next_fut = Box::pin(Fuse::terminated()); + let mut next_item = stream.next(); + let closed = self.0.closed(); + + futures::pin_mut!(closed); + + loop { + if next_fut.is_terminated() { + if let Some(v) = buf.pop() { + let val = self.to_sub_message(&v); + next_fut.set(async { self.0.send(val).await }.fuse()); } + } + + match future::select(closed, future::select(next_fut, next_item)).await { + // Send operation finished. + Either::Right((Either::Left((_, n)), c)) => { + next_item = n; + closed = c; + next_fut = Box::pin(Fuse::terminated()); + }, + // New item from the stream + Either::Right((Either::Right((Some(v), n)), c)) => { + if buf.push(v).is_err() { + log::debug!( + target: "rpc", + "Subscription buffer full for subscription={} conn_id={}; dropping subscription", + self.0.method_name(), + self.0.connection_id().0 + ); + return + } - while let Some(v) = buf.pop_front() { - let val = to_sub_message(&sink, &v); - if sink.send(val).await.is_err() { + next_fut = n; + closed = c; + next_item = stream.next(); + }, + // Stream "finished". + // + // Process remaining items and terminate. + Either::Right((Either::Right((None, pending_fut)), _)) => { + if !pending_fut.is_terminated() && pending_fut.await.is_err() { return; } - } - return; - }, - // Subscription was closed. - Either::Left(_) => return, + while let Some(v) = buf.pop() { + if self.send(&v).await.is_err() { + return; + } + } + + return; + }, + // Subscription was closed. + Either::Left(_) => return, + } } } -} -/// Builds a subscription message. -/// -/// # Panics -/// -/// This function panics `Serialize` fails and it is treated a bug. -pub fn to_sub_message(sink: &SubscriptionSink, result: &impl Serialize) -> SubscriptionMessage { - SubscriptionMessage::new(sink.method_name(), sink.subscription_id(), result) - .expect("Serialize infallible; qed") + /// Send a message on the subscription. + pub async fn send(&self, result: &impl Serialize) -> Result<(), DisconnectError> { + self.0.send(self.to_sub_message(result)).await + } + + /// Get the subscription id. + pub fn subscription_id(&self) -> SubscriptionId { + self.0.subscription_id() + } + + /// Completes when the subscription is closed. + pub async fn closed(&self) { + self.0.closed().await + } + + /// Convert a result to a subscription message. + fn to_sub_message(&self, result: &impl Serialize) -> SubscriptionMessage { + SubscriptionMessage::new(self.0.method_name(), self.0.subscription_id(), result) + .expect("Serialize infallible; qed") + } } /// Helper for spawning non-blocking rpc subscription task. @@ -182,7 +270,7 @@ pub fn spawn_subscription_task( #[cfg(test)] mod tests { - use super::pipe_from_stream; + use super::*; use futures::StreamExt; use jsonrpsee::{core::EmptyServerParams, RpcModule, Subscription}; @@ -191,7 +279,9 @@ mod tests { module .register_subscription("sub", "my_sub", "unsub", |_, pending, _, _| async move { let stream = futures::stream::iter([0; 16]); - pipe_from_stream(pending, stream).await; + PendingSubscription::from(pending) + .pipe_from_stream(stream, BoundedVecDeque::new(16)) + .await; Ok(()) }) .unwrap(); @@ -212,14 +302,16 @@ mod tests { } #[tokio::test] - async fn pipe_from_stream_is_bounded() { + async fn pipe_from_stream_with_bounded_vec() { let (tx, mut rx) = futures::channel::mpsc::unbounded::<()>(); let mut module = RpcModule::new(tx); module .register_subscription("sub", "my_sub", "unsub", |_, pending, ctx, _| async move { let stream = futures::stream::iter([0; 32]); - pipe_from_stream(pending, stream).await; + PendingSubscription::from(pending) + .pipe_from_stream(stream, BoundedVecDeque::new(16)) + .await; _ = ctx.unbounded_send(()); Ok(()) }) @@ -248,7 +340,9 @@ mod tests { // to sync buffer and channel send operations let stream = futures::stream::empty::<()>(); // this should exit immediately - pipe_from_stream(pending, stream).await; + PendingSubscription::from(pending) + .pipe_from_stream(stream, BoundedVecDeque::default()) + .await; // notify that the `pipe_from_stream` has returned notify_tx.notify_one(); Ok(()) @@ -260,4 +354,35 @@ mod tests { // it should fire once `pipe_from_stream` returns notify_rx.notified().await; } + + #[tokio::test] + async fn subscription_replace_old_messages() { + let mut module = RpcModule::new(()); + module + .register_subscription("sub", "my_sub", "unsub", |_, pending, _, _| async move { + // Send items 0..20 and ensure that only the last 3 are kept in the buffer. + let stream = futures::stream::iter(0..20); + PendingSubscription::from(pending) + .pipe_from_stream(stream, RingBuffer::new(3)) + .await; + Ok(()) + }) + .unwrap(); + + let mut sub = module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap(); + + // This is a hack simulate a very slow client + // and all older messages are replaced. + tokio::time::sleep(std::time::Duration::from_secs(10)).await; + + let mut res = Vec::new(); + + while let Some(Ok((v, _))) = sub.next::().await { + res.push(v); + } + + // There is no way to cancel pending send operations so + // that's why 0 is included here. + assert_eq!(res, vec![0, 17, 18, 19]); + } } diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index 620e6544b08f..669279f12b27 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -41,6 +41,7 @@ use frame_benchmarking::v2::*; use frame_support::{ self, assert_ok, pallet_prelude::StorageVersion, + storage::child, traits::{fungible::InspectHold, Currency}, weights::{Weight, WeightMeter}, }; @@ -63,6 +64,9 @@ const API_BENCHMARK_RUNS: u32 = 1600; /// benchmarks are faster. const INSTR_BENCHMARK_RUNS: u32 = 5000; +/// Number of layers in a Radix16 unbalanced trie. +const UNBALANCED_TRIE_LAYERS: u32 = 20; + /// An instantiated and deployed contract. #[derive(Clone)] struct Contract { @@ -152,6 +156,36 @@ where Ok(()) } + /// Create a new contract with the specified unbalanced storage trie. + fn with_unbalanced_storage_trie(code: WasmModule, key: &[u8]) -> Result { + if (key.len() as u32) < (UNBALANCED_TRIE_LAYERS + 1) / 2 { + return Err("Key size too small to create the specified trie"); + } + + let value = vec![16u8; T::Schedule::get().limits.payload_len as usize]; + let contract = Contract::::new(code, vec![])?; + let info = contract.info()?; + let child_trie_info = info.child_trie_info(); + child::put_raw(&child_trie_info, &key, &value); + for l in 0..UNBALANCED_TRIE_LAYERS { + let pos = l as usize / 2; + let mut key_new = key.to_vec(); + for i in 0u8..16 { + key_new[pos] = if l % 2 == 0 { + (key_new[pos] & 0xF0) | i + } else { + (key_new[pos] & 0x0F) | (i << 4) + }; + + if key == &key_new { + continue + } + child::put_raw(&child_trie_info, &key_new, &value); + } + } + Ok(contract) + } + /// Get the `ContractInfo` of the `addr` or an error if it no longer exists. fn address_info(addr: &T::AccountId) -> Result, &'static str> { ContractInfoOf::::get(addr).ok_or("Expected contract to exist at this point.") @@ -1014,6 +1048,102 @@ mod benchmarks { assert_eq!(setup.debug_message().unwrap().len() as u32, i); } + #[benchmark(skip_meta, pov_mode = Measured)] + fn get_storage_empty() -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = vec![0u8; max_key_len as usize]; + let max_value_len = T::Schedule::get().limits.payload_len as usize; + let value = vec![1u8; max_value_len]; + + let instance = Contract::::new(WasmModule::dummy(), vec![])?; + let info = instance.info()?; + let child_trie_info = info.child_trie_info(); + info.bench_write_raw(&key, Some(value.clone()), false) + .map_err(|_| "Failed to write to storage during setup.")?; + + let result; + #[block] + { + result = child::get_raw(&child_trie_info, &key); + } + + assert_eq!(result, Some(value)); + Ok(()) + } + + #[benchmark(skip_meta, pov_mode = Measured)] + fn get_storage_full() -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = vec![0u8; max_key_len as usize]; + let max_value_len = T::Schedule::get().limits.payload_len; + let value = vec![1u8; max_value_len as usize]; + + let instance = Contract::::with_unbalanced_storage_trie(WasmModule::dummy(), &key)?; + let info = instance.info()?; + let child_trie_info = info.child_trie_info(); + info.bench_write_raw(&key, Some(value.clone()), false) + .map_err(|_| "Failed to write to storage during setup.")?; + + let result; + #[block] + { + result = child::get_raw(&child_trie_info, &key); + } + + assert_eq!(result, Some(value)); + Ok(()) + } + + #[benchmark(skip_meta, pov_mode = Measured)] + fn set_storage_empty() -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = vec![0u8; max_key_len as usize]; + let max_value_len = T::Schedule::get().limits.payload_len as usize; + let value = vec![1u8; max_value_len]; + + let instance = Contract::::new(WasmModule::dummy(), vec![])?; + let info = instance.info()?; + let child_trie_info = info.child_trie_info(); + info.bench_write_raw(&key, Some(vec![42u8; max_value_len]), false) + .map_err(|_| "Failed to write to storage during setup.")?; + + let val = Some(value.clone()); + let result; + #[block] + { + result = info.bench_write_raw(&key, val, true); + } + + assert_ok!(result); + assert_eq!(child::get_raw(&child_trie_info, &key).unwrap(), value); + Ok(()) + } + + #[benchmark(skip_meta, pov_mode = Measured)] + fn set_storage_full() -> Result<(), BenchmarkError> { + let max_key_len = T::MaxStorageKeyLen::get(); + let key = vec![0u8; max_key_len as usize]; + let max_value_len = T::Schedule::get().limits.payload_len; + let value = vec![1u8; max_value_len as usize]; + + let instance = Contract::::with_unbalanced_storage_trie(WasmModule::dummy(), &key)?; + let info = instance.info()?; + let child_trie_info = info.child_trie_info(); + info.bench_write_raw(&key, Some(vec![42u8; max_value_len as usize]), false) + .map_err(|_| "Failed to write to storage during setup.")?; + + let val = Some(value.clone()); + let result; + #[block] + { + result = info.bench_write_raw(&key, val, true); + } + + assert_ok!(result); + assert_eq!(child::get_raw(&child_trie_info, &key).unwrap(), value); + Ok(()) + } + // n: new byte size // o: old byte size #[benchmark(skip_meta, pov_mode = Measured)] diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index 65e7129cdf84..c0201266c816 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -164,13 +164,35 @@ impl ContractInfo { storage_meter: Option<&mut meter::NestedMeter>, take: bool, ) -> Result { - let child_trie_info = &self.child_trie_info(); let hashed_key = key.hash(); + self.write_raw(&hashed_key, new_value, storage_meter, take) + } + + /// Update a storage entry into a contract's kv storage. + /// Function used in benchmarks, which can simulate prefix collision in keys. + #[cfg(feature = "runtime-benchmarks")] + pub fn bench_write_raw( + &self, + key: &[u8], + new_value: Option>, + take: bool, + ) -> Result { + self.write_raw(key, new_value, None, take) + } + + fn write_raw( + &self, + key: &[u8], + new_value: Option>, + storage_meter: Option<&mut meter::NestedMeter>, + take: bool, + ) -> Result { + let child_trie_info = &self.child_trie_info(); let (old_len, old_value) = if take { - let val = child::get_raw(child_trie_info, &hashed_key); + let val = child::get_raw(child_trie_info, key); (val.as_ref().map(|v| v.len() as u32), val) } else { - (child::len(child_trie_info, &hashed_key), None) + (child::len(child_trie_info, key), None) }; if let Some(storage_meter) = storage_meter { @@ -196,8 +218,8 @@ impl ContractInfo { } match &new_value { - Some(new_value) => child::put_raw(child_trie_info, &hashed_key, new_value), - None => child::kill(child_trie_info, &hashed_key), + Some(new_value) => child::put_raw(child_trie_info, key, new_value), + None => child::kill(child_trie_info, key), } Ok(match (old_len, old_value) { diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 982d28540ec1..fee127f65852 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -255,31 +255,34 @@ pub enum RuntimeCosts { UnlockDelegateDependency, } -// For the function that modifies the storage, the benchmarks are done with one item in the -// transient_storage (BTreeMap). To consider the worst-case scenario, the weight of the overhead of -// writing to a full BTreeMap should be included. On top of that, the rollback weight is added, -// which is the worst scenario. -macro_rules! cost_write { - // cost_write!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_add(T::WeightInfo::rollback_transient_storage()) - // .saturating_add(T::WeightInfo::set_transient_storage_full().saturating_sub(T::WeightInfo::set_transient_storage_empty()) - ($name:ident $(, $arg:expr )*) => { - (T::WeightInfo::$name($( $arg ),*).saturating_add(T::WeightInfo::rollback_transient_storage()).saturating_add(cost_write!(@cost_storage))) - }; +/// For functions that modify storage, benchmarks are performed with one item in the +/// storage. To account for the worst-case scenario, the weight of the overhead of +/// writing to or reading from full storage is included. For transient storage writes, +/// the rollback weight is added to reflect the worst-case scenario for this operation. +macro_rules! cost_storage { + (write_transient, $name:ident $(, $arg:expr )*) => { + T::WeightInfo::$name($( $arg ),*) + .saturating_add(T::WeightInfo::rollback_transient_storage()) + .saturating_add(T::WeightInfo::set_transient_storage_full() + .saturating_sub(T::WeightInfo::set_transient_storage_empty())) + }; - (@cost_storage) => { - T::WeightInfo::set_transient_storage_full().saturating_sub(T::WeightInfo::set_transient_storage_empty()) + (read_transient, $name:ident $(, $arg:expr )*) => { + T::WeightInfo::$name($( $arg ),*) + .saturating_add(T::WeightInfo::get_transient_storage_full() + .saturating_sub(T::WeightInfo::get_transient_storage_empty())) }; -} -macro_rules! cost_read { - // cost_read!(name, a, b, c) -> T::WeightInfo::name(a, b, c).saturating_add(T::WeightInfo::get_transient_storage_full() - // .saturating_sub(T::WeightInfo::get_transient_storage_empty()) - ($name:ident $(, $arg:expr )*) => { - (T::WeightInfo::$name($( $arg ),*).saturating_add(cost_read!(@cost_storage))) - }; + (write, $name:ident $(, $arg:expr )*) => { + T::WeightInfo::$name($( $arg ),*) + .saturating_add(T::WeightInfo::set_storage_full() + .saturating_sub(T::WeightInfo::set_storage_empty())) + }; - (@cost_storage) => { - T::WeightInfo::get_transient_storage_full().saturating_sub(T::WeightInfo::get_transient_storage_empty()) + (read, $name:ident $(, $arg:expr )*) => { + T::WeightInfo::$name($( $arg ),*) + .saturating_add(T::WeightInfo::get_storage_full() + .saturating_sub(T::WeightInfo::get_storage_empty())) }; } @@ -329,17 +332,21 @@ impl Token for RuntimeCosts { DepositEvent { num_topic, len } => T::WeightInfo::seal_deposit_event(num_topic, len), DebugMessage(len) => T::WeightInfo::seal_debug_message(len), SetStorage { new_bytes, old_bytes } => - T::WeightInfo::seal_set_storage(new_bytes, old_bytes), - ClearStorage(len) => T::WeightInfo::seal_clear_storage(len), - ContainsStorage(len) => T::WeightInfo::seal_contains_storage(len), - GetStorage(len) => T::WeightInfo::seal_get_storage(len), - TakeStorage(len) => T::WeightInfo::seal_take_storage(len), + cost_storage!(write, seal_set_storage, new_bytes, old_bytes), + ClearStorage(len) => cost_storage!(write, seal_clear_storage, len), + ContainsStorage(len) => cost_storage!(read, seal_contains_storage, len), + GetStorage(len) => cost_storage!(read, seal_get_storage, len), + TakeStorage(len) => cost_storage!(write, seal_take_storage, len), SetTransientStorage { new_bytes, old_bytes } => - cost_write!(seal_set_transient_storage, new_bytes, old_bytes), - ClearTransientStorage(len) => cost_write!(seal_clear_transient_storage, len), - ContainsTransientStorage(len) => cost_read!(seal_contains_transient_storage, len), - GetTransientStorage(len) => cost_read!(seal_get_transient_storage, len), - TakeTransientStorage(len) => cost_write!(seal_take_transient_storage, len), + cost_storage!(write_transient, seal_set_transient_storage, new_bytes, old_bytes), + ClearTransientStorage(len) => + cost_storage!(write_transient, seal_clear_transient_storage, len), + ContainsTransientStorage(len) => + cost_storage!(read_transient, seal_contains_transient_storage, len), + GetTransientStorage(len) => + cost_storage!(read_transient, seal_get_transient_storage, len), + TakeTransientStorage(len) => + cost_storage!(write_transient, seal_take_transient_storage, len), Transfer => T::WeightInfo::seal_transfer(), CallBase => T::WeightInfo::seal_call(0, 0), DelegateCallBase => T::WeightInfo::seal_delegate_call(), diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index dc10a8aee773..25b36fc404fe 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for `pallet_contracts` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-07-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-07-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-yaoqqom-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` @@ -93,6 +93,10 @@ pub trait WeightInfo { fn seal_random() -> Weight; fn seal_deposit_event(t: u32, n: u32, ) -> Weight; fn seal_debug_message(i: u32, ) -> Weight; + fn get_storage_empty() -> Weight; + fn get_storage_full() -> Weight; + fn set_storage_empty() -> Weight; + fn set_storage_full() -> Weight; fn seal_set_storage(n: u32, o: u32, ) -> Weight; fn seal_clear_storage(n: u32, ) -> Weight; fn seal_get_storage(n: u32, ) -> Weight; @@ -137,8 +141,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_921_000 picoseconds. - Weight::from_parts(2_003_000, 1627) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_986_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -148,10 +152,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_364_000 picoseconds. - Weight::from_parts(11_463_000, 442) - // Standard Error: 2_141 - .saturating_add(Weight::from_parts(1_149_944, 0).saturating_mul(k.into())) + // Minimum execution time: 11_103_000 picoseconds. + Weight::from_parts(11_326_000, 442) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes(2_u64)) @@ -165,10 +169,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_565_000 picoseconds. - Weight::from_parts(5_041_009, 6149) + // Minimum execution time: 7_783_000 picoseconds. + Weight::from_parts(4_462_075, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_640, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -181,8 +185,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 15_894_000 picoseconds. - Weight::from_parts(16_618_000, 6450) + // Minimum execution time: 15_971_000 picoseconds. + Weight::from_parts(16_730_000, 6450) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -195,10 +199,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_077_000 picoseconds. - Weight::from_parts(3_144_000, 3635) - // Standard Error: 650 - .saturating_add(Weight::from_parts(1_095_835, 0).saturating_mul(k.into())) + // Minimum execution time: 3_149_000 picoseconds. + Weight::from_parts(3_264_000, 3635) + // Standard Error: 559 + .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -217,10 +221,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 14_960_000 picoseconds. - Weight::from_parts(15_778_951, 6263) - // Standard Error: 1 - .saturating_add(Weight::from_parts(443, 0).saturating_mul(c.into())) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_721_891, 6263) + // Standard Error: 2 + .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -231,8 +235,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_849_000 picoseconds. - Weight::from_parts(12_273_000, 6380) + // Minimum execution time: 12_047_000 picoseconds. + Weight::from_parts(12_500_000, 6380) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -246,8 +250,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_862_000 picoseconds. - Weight::from_parts(48_879_000, 6292) + // Minimum execution time: 47_488_000 picoseconds. + Weight::from_parts(48_482_000, 6292) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -259,8 +263,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 50_754_000 picoseconds. - Weight::from_parts(52_720_000, 6534) + // Minimum execution time: 52_801_000 picoseconds. + Weight::from_parts(54_230_000, 6534) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -270,8 +274,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_459_000 picoseconds. - Weight::from_parts(11_921_000, 6349) + // Minimum execution time: 11_618_000 picoseconds. + Weight::from_parts(12_068_000, 6349) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -281,8 +285,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_135_000 picoseconds. - Weight::from_parts(2_247_000, 1627) + // Minimum execution time: 2_131_000 picoseconds. + Weight::from_parts(2_255_000, 1627) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -294,8 +298,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_645_000 picoseconds. - Weight::from_parts(11_107_000, 3631) + // Minimum execution time: 10_773_000 picoseconds. + Weight::from_parts(11_118_000, 3631) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -305,8 +309,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_353_000 picoseconds. - Weight::from_parts(4_628_000, 3607) + // Minimum execution time: 4_371_000 picoseconds. + Weight::from_parts(4_624_000, 3607) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -317,8 +321,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_432_000 picoseconds. - Weight::from_parts(5_624_000, 3632) + // Minimum execution time: 5_612_000 picoseconds. + Weight::from_parts(5_838_000, 3632) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -329,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_371_000 picoseconds. - Weight::from_parts(5_794_000, 3607) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_693_000, 3607) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -351,10 +355,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `800 + c * (1 ±0)` // Estimated: `4266 + c * (1 ±0)` - // Minimum execution time: 247_157_000 picoseconds. - Weight::from_parts(269_252_698, 4266) + // Minimum execution time: 247_545_000 picoseconds. + Weight::from_parts(268_016_699, 4266) // Standard Error: 4 - .saturating_add(Weight::from_parts(729, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -382,14 +386,14 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `323` // Estimated: `6262` - // Minimum execution time: 4_575_784_000 picoseconds. - Weight::from_parts(207_379_459, 6262) - // Standard Error: 124 - .saturating_add(Weight::from_parts(52_392, 0).saturating_mul(c.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(i.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(2_263, 0).saturating_mul(s.into())) + // Minimum execution time: 4_396_772_000 picoseconds. + Weight::from_parts(235_107_907, 6262) + // Standard Error: 185 + .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_210, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -415,12 +419,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `560` // Estimated: `4017` - // Minimum execution time: 2_306_770_000 picoseconds. - Weight::from_parts(2_462_908_000, 4017) - // Standard Error: 33 - .saturating_add(Weight::from_parts(898, 0).saturating_mul(i.into())) - // Standard Error: 33 - .saturating_add(Weight::from_parts(859, 0).saturating_mul(s.into())) + // Minimum execution time: 2_240_868_000 picoseconds. + Weight::from_parts(2_273_668_000, 4017) + // Standard Error: 32 + .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) + // Standard Error: 32 + .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -440,8 +444,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 165_499_000 picoseconds. - Weight::from_parts(169_903_000, 4291) + // Minimum execution time: 165_067_000 picoseconds. + Weight::from_parts(168_582_000, 4291) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -458,10 +462,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 227_590_000 picoseconds. - Weight::from_parts(260_045_588, 3607) - // Standard Error: 52 - .saturating_add(Weight::from_parts(51_305, 0).saturating_mul(c.into())) + // Minimum execution time: 229_454_000 picoseconds. + Weight::from_parts(251_495_551, 3607) + // Standard Error: 71 + .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -478,10 +482,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 239_634_000 picoseconds. - Weight::from_parts(262_040_831, 3607) - // Standard Error: 103 - .saturating_add(Weight::from_parts(51_590, 0).saturating_mul(c.into())) + // Minimum execution time: 240_390_000 picoseconds. + Weight::from_parts(273_854_266, 3607) + // Standard Error: 243 + .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -497,8 +501,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 39_152_000 picoseconds. - Weight::from_parts(39_970_000, 3780) + // Minimum execution time: 39_374_000 picoseconds. + Weight::from_parts(40_247_000, 3780) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -512,8 +516,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 25_143_000 picoseconds. - Weight::from_parts(26_103_000, 6492) + // Minimum execution time: 24_473_000 picoseconds. + Weight::from_parts(25_890_000, 6492) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -522,17 +526,17 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_406_000 picoseconds. - Weight::from_parts(9_056_753, 0) + // Minimum execution time: 8_528_000 picoseconds. + Weight::from_parts(9_301_010, 0) // Standard Error: 98 - .saturating_add(Weight::from_parts(53_110, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 659_000 picoseconds. - Weight::from_parts(705_000, 0) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(678_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -540,8 +544,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_165_000 picoseconds. - Weight::from_parts(6_340_000, 3819) + // Minimum execution time: 6_107_000 picoseconds. + Weight::from_parts(6_235_000, 3819) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -550,79 +554,79 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_398_000 picoseconds. - Weight::from_parts(7_661_000, 3912) + // Minimum execution time: 7_316_000 picoseconds. + Weight::from_parts(7_653_000, 3912) .saturating_add(T::DbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 723_000 picoseconds. - Weight::from_parts(793_000, 0) + // Minimum execution time: 721_000 picoseconds. + Weight::from_parts(764_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 398_000 picoseconds. - Weight::from_parts(428_000, 0) + // Minimum execution time: 369_000 picoseconds. + Weight::from_parts(417_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 329_000 picoseconds. - Weight::from_parts(364_000, 0) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(349_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 592_000 picoseconds. - Weight::from_parts(624_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(628_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 665_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(730_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_486_000 picoseconds. - Weight::from_parts(4_668_000, 0) + // Minimum execution time: 4_361_000 picoseconds. + Weight::from_parts(4_577_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 548_000 picoseconds. - Weight::from_parts(590_000, 0) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(603_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 536_000 picoseconds. - Weight::from_parts(578_000, 0) + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(610_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 552_000 picoseconds. - Weight::from_parts(599_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(583_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 556_000 picoseconds. - Weight::from_parts(600_000, 0) + // Minimum execution time: 550_000 picoseconds. + Weight::from_parts(602_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -630,8 +634,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 4_084_000 picoseconds. - Weight::from_parts(4_321_000, 1552) + // Minimum execution time: 4_065_000 picoseconds. + Weight::from_parts(4_291_000, 1552) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. @@ -639,20 +643,20 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 468_000 picoseconds. - Weight::from_parts(492_000, 0) + // Minimum execution time: 487_000 picoseconds. + Weight::from_parts(517_000, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(310, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 377_000 picoseconds. - Weight::from_parts(396_000, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(431, 0).saturating_mul(n.into())) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(372_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -665,10 +669,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_028_000 picoseconds. - Weight::from_parts(15_330_917, 3784) - // Standard Error: 8_260 - .saturating_add(Weight::from_parts(3_594_893, 0).saturating_mul(n.into())) + // Minimum execution time: 13_251_000 picoseconds. + Weight::from_parts(15_257_892, 3784) + // Standard Error: 7_089 + .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(3_u64)) @@ -681,8 +685,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_367_000 picoseconds. - Weight::from_parts(3_555_000, 1561) + // Minimum execution time: 3_434_000 picoseconds. + Weight::from_parts(3_605_000, 1561) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -693,12 +697,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_779_000 picoseconds. - Weight::from_parts(4_003_836, 990) - // Standard Error: 5_409 - .saturating_add(Weight::from_parts(2_082_176, 0).saturating_mul(t.into())) + // Minimum execution time: 3_668_000 picoseconds. + Weight::from_parts(3_999_591, 990) + // Standard Error: 5_767 + .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(14, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -708,10 +712,52 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 409_000 picoseconds. - Weight::from_parts(447_000, 0) + // Minimum execution time: 443_000 picoseconds. + Weight::from_parts(472_000, 0) // Standard Error: 10 - .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn get_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `16618` + // Estimated: `16618` + // Minimum execution time: 13_752_000 picoseconds. + Weight::from_parts(14_356_000, 16618) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn get_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `26628` + // Estimated: `26628` + // Minimum execution time: 43_444_000 picoseconds. + Weight::from_parts(45_087_000, 26628) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `16618` + // Estimated: `16618` + // Minimum execution time: 15_616_000 picoseconds. + Weight::from_parts(16_010_000, 16618) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `26628` + // Estimated: `26628` + // Minimum execution time: 47_020_000 picoseconds. + Weight::from_parts(50_152_000, 26628) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -721,12 +767,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 9_176_000 picoseconds. - Weight::from_parts(9_121_191, 249) + // Minimum execution time: 8_824_000 picoseconds. + Weight::from_parts(8_915_233, 249) // Standard Error: 1 - .saturating_add(Weight::from_parts(292, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(31, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -738,10 +784,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_294_000 picoseconds. - Weight::from_parts(7_963_151, 248) + // Minimum execution time: 7_133_000 picoseconds. + Weight::from_parts(7_912_778, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -753,10 +799,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_978_000 picoseconds. - Weight::from_parts(7_741_355, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(654, 0).saturating_mul(n.into())) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_647_236, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -767,10 +813,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_286_000 picoseconds. - Weight::from_parts(7_026_923, 248) + // Minimum execution time: 6_247_000 picoseconds. + Weight::from_parts(6_952_661, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -781,10 +827,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_597_000 picoseconds. - Weight::from_parts(8_706_785, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(653, 0).saturating_mul(n.into())) + // Minimum execution time: 7_428_000 picoseconds. + Weight::from_parts(8_384_015, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -793,36 +839,36 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_497_000 picoseconds. - Weight::from_parts(1_564_000, 0) + // Minimum execution time: 1_478_000 picoseconds. + Weight::from_parts(1_533_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_807_000, 0) + // Minimum execution time: 2_485_000 picoseconds. + Weight::from_parts(2_728_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_836_000 picoseconds. - Weight::from_parts(3_878_000, 0) + // Minimum execution time: 3_195_000 picoseconds. + Weight::from_parts(3_811_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_537_000 picoseconds. - Weight::from_parts(4_665_000, 0) + // Minimum execution time: 3_902_000 picoseconds. + Weight::from_parts(4_118_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_592_000 picoseconds. - Weight::from_parts(1_742_000, 0) + // Minimum execution time: 1_571_000 picoseconds. + Weight::from_parts(1_662_000, 0) } /// The range of component `n` is `[0, 16384]`. /// The range of component `o` is `[0, 16384]`. @@ -830,57 +876,57 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_101_000 picoseconds. - Weight::from_parts(2_481_218, 0) + // Minimum execution time: 5_250_000 picoseconds. + Weight::from_parts(2_465_568, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(300, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_059_000 picoseconds. - Weight::from_parts(2_426_609, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(307, 0).saturating_mul(n.into())) + // Minimum execution time: 2_012_000 picoseconds. + Weight::from_parts(2_288_004, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_114_837, 0) + // Minimum execution time: 1_906_000 picoseconds. + Weight::from_parts(2_121_040, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(302, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_759_000 picoseconds. - Weight::from_parts(1_959_995, 0) + // Minimum execution time: 1_736_000 picoseconds. + Weight::from_parts(1_954_728, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_759_000 picoseconds. - Weight::from_parts(9_952_099, 0) + // Minimum execution time: 7_872_000 picoseconds. + Weight::from_parts(8_125_644, 0) } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_700_000 picoseconds. - Weight::from_parts(8_903_000, 0) + // Minimum execution time: 8_489_000 picoseconds. + Weight::from_parts(8_791_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -896,10 +942,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 123_399_000 picoseconds. - Weight::from_parts(120_909_821, 4085) - // Standard Error: 166_830 - .saturating_add(Weight::from_parts(43_853_642, 0).saturating_mul(t.into())) + // Minimum execution time: 122_759_000 picoseconds. + Weight::from_parts(120_016_020, 4085) + // Standard Error: 173_118 + .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) // Standard Error: 0 .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) @@ -916,8 +962,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 112_350_000 picoseconds. - Weight::from_parts(116_003_000, 3895) + // Minimum execution time: 111_566_000 picoseconds. + Weight::from_parts(115_083_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -936,12 +982,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `676` // Estimated: `4132` - // Minimum execution time: 1_972_276_000 picoseconds. - Weight::from_parts(1_977_872_000, 4132) + // Minimum execution time: 1_871_402_000 picoseconds. + Weight::from_parts(1_890_038_000, 4132) // Standard Error: 24 - .saturating_add(Weight::from_parts(623, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) // Standard Error: 24 - .saturating_add(Weight::from_parts(917, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(915, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -950,64 +996,64 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 899_000 picoseconds. - Weight::from_parts(10_963_972, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) + // Minimum execution time: 966_000 picoseconds. + Weight::from_parts(9_599_151, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_396_000 picoseconds. - Weight::from_parts(9_404_986, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_627, 0).saturating_mul(n.into())) + // Minimum execution time: 1_416_000 picoseconds. + Weight::from_parts(10_964_255, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 834_000 picoseconds. - Weight::from_parts(9_749_716, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(6_579_283, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_500, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 756_000 picoseconds. - Weight::from_parts(8_995_036, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_495, 0).saturating_mul(n.into())) + // Minimum execution time: 773_000 picoseconds. + Weight::from_parts(10_990_209, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 45_800_000 picoseconds. - Weight::from_parts(44_676_829, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(5_315, 0).saturating_mul(n.into())) + // Minimum execution time: 43_195_000 picoseconds. + Weight::from_parts(41_864_855, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_415_000 picoseconds. - Weight::from_parts(48_743_000, 0) + // Minimum execution time: 47_747_000 picoseconds. + Weight::from_parts(49_219_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_437_000 picoseconds. - Weight::from_parts(13_588_000, 0) + // Minimum execution time: 12_854_000 picoseconds. + Weight::from_parts(12_962_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1017,8 +1063,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 17_775_000 picoseconds. - Weight::from_parts(18_332_000, 3895) + // Minimum execution time: 17_868_000 picoseconds. + Weight::from_parts(18_486_000, 3895) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1028,8 +1074,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_326_000 picoseconds. - Weight::from_parts(8_656_000, 3820) + // Minimum execution time: 8_393_000 picoseconds. + Weight::from_parts(8_640_000, 3820) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1039,8 +1085,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_276_000 picoseconds. - Weight::from_parts(7_630_000, 3558) + // Minimum execution time: 7_489_000 picoseconds. + Weight::from_parts(7_815_000, 3558) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -1048,15 +1094,15 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 330_000 picoseconds. - Weight::from_parts(373_000, 0) + // Minimum execution time: 299_000 picoseconds. + Weight::from_parts(339_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 381_000 picoseconds. - Weight::from_parts(418_000, 0) + // Minimum execution time: 324_000 picoseconds. + Weight::from_parts(380_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1064,8 +1110,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_711_000 picoseconds. - Weight::from_parts(2_941_000, 1704) + // Minimum execution time: 2_768_000 picoseconds. + Weight::from_parts(3_025_000, 1704) .saturating_add(T::DbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -1073,10 +1119,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 720_000 picoseconds. - Weight::from_parts(389_111, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(7_278, 0).saturating_mul(r.into())) + // Minimum execution time: 766_000 picoseconds. + Weight::from_parts(722_169, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) } } @@ -1088,8 +1134,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 1_921_000 picoseconds. - Weight::from_parts(2_003_000, 1627) + // Minimum execution time: 1_915_000 picoseconds. + Weight::from_parts(1_986_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) @@ -1099,10 +1145,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `452 + k * (69 ±0)` // Estimated: `442 + k * (70 ±0)` - // Minimum execution time: 11_364_000 picoseconds. - Weight::from_parts(11_463_000, 442) - // Standard Error: 2_141 - .saturating_add(Weight::from_parts(1_149_944, 0).saturating_mul(k.into())) + // Minimum execution time: 11_103_000 picoseconds. + Weight::from_parts(11_326_000, 442) + // Standard Error: 2_291 + .saturating_add(Weight::from_parts(1_196_329, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes(2_u64)) @@ -1116,10 +1162,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `211 + c * (1 ±0)` // Estimated: `6149 + c * (1 ±0)` - // Minimum execution time: 7_565_000 picoseconds. - Weight::from_parts(5_041_009, 6149) + // Minimum execution time: 7_783_000 picoseconds. + Weight::from_parts(4_462_075, 6149) // Standard Error: 5 - .saturating_add(Weight::from_parts(1_640, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(1_634, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1132,8 +1178,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `510` // Estimated: `6450` - // Minimum execution time: 15_894_000 picoseconds. - Weight::from_parts(16_618_000, 6450) + // Minimum execution time: 15_971_000 picoseconds. + Weight::from_parts(16_730_000, 6450) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1146,10 +1192,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `171 + k * (1 ±0)` // Estimated: `3635 + k * (1 ±0)` - // Minimum execution time: 3_077_000 picoseconds. - Weight::from_parts(3_144_000, 3635) - // Standard Error: 650 - .saturating_add(Weight::from_parts(1_095_835, 0).saturating_mul(k.into())) + // Minimum execution time: 3_149_000 picoseconds. + Weight::from_parts(3_264_000, 3635) + // Standard Error: 559 + .saturating_add(Weight::from_parts(1_111_209, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1168,10 +1214,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `325 + c * (1 ±0)` // Estimated: `6263 + c * (1 ±0)` - // Minimum execution time: 14_960_000 picoseconds. - Weight::from_parts(15_778_951, 6263) - // Standard Error: 1 - .saturating_add(Weight::from_parts(443, 0).saturating_mul(c.into())) + // Minimum execution time: 15_072_000 picoseconds. + Weight::from_parts(15_721_891, 6263) + // Standard Error: 2 + .saturating_add(Weight::from_parts(428, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1182,8 +1228,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `440` // Estimated: `6380` - // Minimum execution time: 11_849_000 picoseconds. - Weight::from_parts(12_273_000, 6380) + // Minimum execution time: 12_047_000 picoseconds. + Weight::from_parts(12_500_000, 6380) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1197,8 +1243,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `352` // Estimated: `6292` - // Minimum execution time: 47_862_000 picoseconds. - Weight::from_parts(48_879_000, 6292) + // Minimum execution time: 47_488_000 picoseconds. + Weight::from_parts(48_482_000, 6292) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1210,8 +1256,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `594` // Estimated: `6534` - // Minimum execution time: 50_754_000 picoseconds. - Weight::from_parts(52_720_000, 6534) + // Minimum execution time: 52_801_000 picoseconds. + Weight::from_parts(54_230_000, 6534) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1221,8 +1267,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `409` // Estimated: `6349` - // Minimum execution time: 11_459_000 picoseconds. - Weight::from_parts(11_921_000, 6349) + // Minimum execution time: 11_618_000 picoseconds. + Weight::from_parts(12_068_000, 6349) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1232,8 +1278,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `1627` - // Minimum execution time: 2_135_000 picoseconds. - Weight::from_parts(2_247_000, 1627) + // Minimum execution time: 2_131_000 picoseconds. + Weight::from_parts(2_255_000, 1627) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1245,8 +1291,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `166` // Estimated: `3631` - // Minimum execution time: 10_645_000 picoseconds. - Weight::from_parts(11_107_000, 3631) + // Minimum execution time: 10_773_000 picoseconds. + Weight::from_parts(11_118_000, 3631) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1256,8 +1302,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 4_353_000 picoseconds. - Weight::from_parts(4_628_000, 3607) + // Minimum execution time: 4_371_000 picoseconds. + Weight::from_parts(4_624_000, 3607) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1268,8 +1314,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `167` // Estimated: `3632` - // Minimum execution time: 5_432_000 picoseconds. - Weight::from_parts(5_624_000, 3632) + // Minimum execution time: 5_612_000 picoseconds. + Weight::from_parts(5_838_000, 3632) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: UNKNOWN KEY `0x4342193e496fab7ec59d615ed0dc55304e7b9012096b41c4eb3aaf947f6ea429` (r:1 w:0) @@ -1280,8 +1326,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 5_371_000 picoseconds. - Weight::from_parts(5_794_000, 3607) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_693_000, 3607) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1302,10 +1348,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `800 + c * (1 ±0)` // Estimated: `4266 + c * (1 ±0)` - // Minimum execution time: 247_157_000 picoseconds. - Weight::from_parts(269_252_698, 4266) + // Minimum execution time: 247_545_000 picoseconds. + Weight::from_parts(268_016_699, 4266) // Standard Error: 4 - .saturating_add(Weight::from_parts(729, 0).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(700, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(c.into())) @@ -1333,14 +1379,14 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `323` // Estimated: `6262` - // Minimum execution time: 4_575_784_000 picoseconds. - Weight::from_parts(207_379_459, 6262) - // Standard Error: 124 - .saturating_add(Weight::from_parts(52_392, 0).saturating_mul(c.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(2_257, 0).saturating_mul(i.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(2_263, 0).saturating_mul(s.into())) + // Minimum execution time: 4_396_772_000 picoseconds. + Weight::from_parts(235_107_907, 6262) + // Standard Error: 185 + .saturating_add(Weight::from_parts(53_843, 0).saturating_mul(c.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_143, 0).saturating_mul(i.into())) + // Standard Error: 22 + .saturating_add(Weight::from_parts(2_210, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1366,12 +1412,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `560` // Estimated: `4017` - // Minimum execution time: 2_306_770_000 picoseconds. - Weight::from_parts(2_462_908_000, 4017) - // Standard Error: 33 - .saturating_add(Weight::from_parts(898, 0).saturating_mul(i.into())) - // Standard Error: 33 - .saturating_add(Weight::from_parts(859, 0).saturating_mul(s.into())) + // Minimum execution time: 2_240_868_000 picoseconds. + Weight::from_parts(2_273_668_000, 4017) + // Standard Error: 32 + .saturating_add(Weight::from_parts(934, 0).saturating_mul(i.into())) + // Standard Error: 32 + .saturating_add(Weight::from_parts(920, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -1391,8 +1437,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `826` // Estimated: `4291` - // Minimum execution time: 165_499_000 picoseconds. - Weight::from_parts(169_903_000, 4291) + // Minimum execution time: 165_067_000 picoseconds. + Weight::from_parts(168_582_000, 4291) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1409,10 +1455,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 227_590_000 picoseconds. - Weight::from_parts(260_045_588, 3607) - // Standard Error: 52 - .saturating_add(Weight::from_parts(51_305, 0).saturating_mul(c.into())) + // Minimum execution time: 229_454_000 picoseconds. + Weight::from_parts(251_495_551, 3607) + // Standard Error: 71 + .saturating_add(Weight::from_parts(51_428, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1429,10 +1475,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `142` // Estimated: `3607` - // Minimum execution time: 239_634_000 picoseconds. - Weight::from_parts(262_040_831, 3607) - // Standard Error: 103 - .saturating_add(Weight::from_parts(51_590, 0).saturating_mul(c.into())) + // Minimum execution time: 240_390_000 picoseconds. + Weight::from_parts(273_854_266, 3607) + // Standard Error: 243 + .saturating_add(Weight::from_parts(51_836, 0).saturating_mul(c.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1448,8 +1494,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `315` // Estimated: `3780` - // Minimum execution time: 39_152_000 picoseconds. - Weight::from_parts(39_970_000, 3780) + // Minimum execution time: 39_374_000 picoseconds. + Weight::from_parts(40_247_000, 3780) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1463,8 +1509,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `552` // Estimated: `6492` - // Minimum execution time: 25_143_000 picoseconds. - Weight::from_parts(26_103_000, 6492) + // Minimum execution time: 24_473_000 picoseconds. + Weight::from_parts(25_890_000, 6492) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1473,17 +1519,17 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_406_000 picoseconds. - Weight::from_parts(9_056_753, 0) + // Minimum execution time: 8_528_000 picoseconds. + Weight::from_parts(9_301_010, 0) // Standard Error: 98 - .saturating_add(Weight::from_parts(53_110, 0).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(53_173, 0).saturating_mul(r.into())) } fn seal_caller() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 659_000 picoseconds. - Weight::from_parts(705_000, 0) + // Minimum execution time: 643_000 picoseconds. + Weight::from_parts(678_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1491,8 +1537,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `354` // Estimated: `3819` - // Minimum execution time: 6_165_000 picoseconds. - Weight::from_parts(6_340_000, 3819) + // Minimum execution time: 6_107_000 picoseconds. + Weight::from_parts(6_235_000, 3819) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:0) @@ -1501,79 +1547,79 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `447` // Estimated: `3912` - // Minimum execution time: 7_398_000 picoseconds. - Weight::from_parts(7_661_000, 3912) + // Minimum execution time: 7_316_000 picoseconds. + Weight::from_parts(7_653_000, 3912) .saturating_add(RocksDbWeight::get().reads(1_u64)) } fn seal_own_code_hash() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 723_000 picoseconds. - Weight::from_parts(793_000, 0) + // Minimum execution time: 721_000 picoseconds. + Weight::from_parts(764_000, 0) } fn seal_caller_is_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 398_000 picoseconds. - Weight::from_parts(428_000, 0) + // Minimum execution time: 369_000 picoseconds. + Weight::from_parts(417_000, 0) } fn seal_caller_is_root() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 329_000 picoseconds. - Weight::from_parts(364_000, 0) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(349_000, 0) } fn seal_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 592_000 picoseconds. - Weight::from_parts(624_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(628_000, 0) } fn seal_gas_left() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 665_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(730_000, 0) } fn seal_balance() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 4_486_000 picoseconds. - Weight::from_parts(4_668_000, 0) + // Minimum execution time: 4_361_000 picoseconds. + Weight::from_parts(4_577_000, 0) } fn seal_value_transferred() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 548_000 picoseconds. - Weight::from_parts(590_000, 0) + // Minimum execution time: 560_000 picoseconds. + Weight::from_parts(603_000, 0) } fn seal_minimum_balance() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 536_000 picoseconds. - Weight::from_parts(578_000, 0) + // Minimum execution time: 561_000 picoseconds. + Weight::from_parts(610_000, 0) } fn seal_block_number() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 552_000 picoseconds. - Weight::from_parts(599_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(583_000, 0) } fn seal_now() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 556_000 picoseconds. - Weight::from_parts(600_000, 0) + // Minimum execution time: 550_000 picoseconds. + Weight::from_parts(602_000, 0) } /// Storage: `TransactionPayment::NextFeeMultiplier` (r:1 w:0) /// Proof: `TransactionPayment::NextFeeMultiplier` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) @@ -1581,8 +1627,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `67` // Estimated: `1552` - // Minimum execution time: 4_084_000 picoseconds. - Weight::from_parts(4_321_000, 1552) + // Minimum execution time: 4_065_000 picoseconds. + Weight::from_parts(4_291_000, 1552) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `n` is `[0, 1048572]`. @@ -1590,20 +1636,20 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 468_000 picoseconds. - Weight::from_parts(492_000, 0) + // Minimum execution time: 487_000 picoseconds. + Weight::from_parts(517_000, 0) // Standard Error: 3 - .saturating_add(Weight::from_parts(310, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(301, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048572]`. fn seal_return(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 377_000 picoseconds. - Weight::from_parts(396_000, 0) - // Standard Error: 9 - .saturating_add(Weight::from_parts(431, 0).saturating_mul(n.into())) + // Minimum execution time: 318_000 picoseconds. + Weight::from_parts(372_000, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(411, 0).saturating_mul(n.into())) } /// Storage: `Contracts::DeletionQueueCounter` (r:1 w:1) /// Proof: `Contracts::DeletionQueueCounter` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -1616,10 +1662,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `319 + n * (78 ±0)` // Estimated: `3784 + n * (2553 ±0)` - // Minimum execution time: 13_028_000 picoseconds. - Weight::from_parts(15_330_917, 3784) - // Standard Error: 8_260 - .saturating_add(Weight::from_parts(3_594_893, 0).saturating_mul(n.into())) + // Minimum execution time: 13_251_000 picoseconds. + Weight::from_parts(15_257_892, 3784) + // Standard Error: 7_089 + .saturating_add(Weight::from_parts(3_443_907, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(3_u64)) @@ -1632,8 +1678,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `76` // Estimated: `1561` - // Minimum execution time: 3_367_000 picoseconds. - Weight::from_parts(3_555_000, 1561) + // Minimum execution time: 3_434_000 picoseconds. + Weight::from_parts(3_605_000, 1561) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// Storage: `System::EventTopics` (r:4 w:4) @@ -1644,12 +1690,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `990 + t * (2475 ±0)` - // Minimum execution time: 3_779_000 picoseconds. - Weight::from_parts(4_003_836, 990) - // Standard Error: 5_409 - .saturating_add(Weight::from_parts(2_082_176, 0).saturating_mul(t.into())) + // Minimum execution time: 3_668_000 picoseconds. + Weight::from_parts(3_999_591, 990) + // Standard Error: 5_767 + .saturating_add(Weight::from_parts(2_011_090, 0).saturating_mul(t.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(14, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(12, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(t.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(t.into()))) .saturating_add(Weight::from_parts(0, 2475).saturating_mul(t.into())) @@ -1659,10 +1705,52 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 409_000 picoseconds. - Weight::from_parts(447_000, 0) + // Minimum execution time: 443_000 picoseconds. + Weight::from_parts(472_000, 0) // Standard Error: 10 - .saturating_add(Weight::from_parts(1_219, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(1_207, 0).saturating_mul(i.into())) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn get_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `16618` + // Estimated: `16618` + // Minimum execution time: 13_752_000 picoseconds. + Weight::from_parts(14_356_000, 16618) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn get_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `26628` + // Estimated: `26628` + // Minimum execution time: 43_444_000 picoseconds. + Weight::from_parts(45_087_000, 26628) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_storage_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `16618` + // Estimated: `16618` + // Minimum execution time: 15_616_000 picoseconds. + Weight::from_parts(16_010_000, 16618) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_storage_full() -> Weight { + // Proof Size summary in bytes: + // Measured: `26628` + // Estimated: `26628` + // Minimum execution time: 47_020_000 picoseconds. + Weight::from_parts(50_152_000, 26628) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Skipped::Metadata` (r:0 w:0) /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -1672,12 +1760,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `250 + o * (1 ±0)` // Estimated: `249 + o * (1 ±0)` - // Minimum execution time: 9_176_000 picoseconds. - Weight::from_parts(9_121_191, 249) + // Minimum execution time: 8_824_000 picoseconds. + Weight::from_parts(8_915_233, 249) // Standard Error: 1 - .saturating_add(Weight::from_parts(292, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(255, 0).saturating_mul(n.into())) // Standard Error: 1 - .saturating_add(Weight::from_parts(31, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(39, 0).saturating_mul(o.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(o.into())) @@ -1689,10 +1777,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_294_000 picoseconds. - Weight::from_parts(7_963_151, 248) + // Minimum execution time: 7_133_000 picoseconds. + Weight::from_parts(7_912_778, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(92, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(88, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1704,10 +1792,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_978_000 picoseconds. - Weight::from_parts(7_741_355, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(654, 0).saturating_mul(n.into())) + // Minimum execution time: 6_746_000 picoseconds. + Weight::from_parts(7_647_236, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(603, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1718,10 +1806,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 6_286_000 picoseconds. - Weight::from_parts(7_026_923, 248) + // Minimum execution time: 6_247_000 picoseconds. + Weight::from_parts(6_952_661, 248) // Standard Error: 1 - .saturating_add(Weight::from_parts(86, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(77, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) } @@ -1732,10 +1820,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `248 + n * (1 ±0)` // Estimated: `248 + n * (1 ±0)` - // Minimum execution time: 7_597_000 picoseconds. - Weight::from_parts(8_706_785, 248) - // Standard Error: 1 - .saturating_add(Weight::from_parts(653, 0).saturating_mul(n.into())) + // Minimum execution time: 7_428_000 picoseconds. + Weight::from_parts(8_384_015, 248) + // Standard Error: 2 + .saturating_add(Weight::from_parts(625, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(n.into())) @@ -1744,36 +1832,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_497_000 picoseconds. - Weight::from_parts(1_564_000, 0) + // Minimum execution time: 1_478_000 picoseconds. + Weight::from_parts(1_533_000, 0) } fn set_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_807_000, 0) + // Minimum execution time: 2_485_000 picoseconds. + Weight::from_parts(2_728_000, 0) } fn get_transient_storage_empty() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_836_000 picoseconds. - Weight::from_parts(3_878_000, 0) + // Minimum execution time: 3_195_000 picoseconds. + Weight::from_parts(3_811_000, 0) } fn get_transient_storage_full() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_537_000 picoseconds. - Weight::from_parts(4_665_000, 0) + // Minimum execution time: 3_902_000 picoseconds. + Weight::from_parts(4_118_000, 0) } fn rollback_transient_storage() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_592_000 picoseconds. - Weight::from_parts(1_742_000, 0) + // Minimum execution time: 1_571_000 picoseconds. + Weight::from_parts(1_662_000, 0) } /// The range of component `n` is `[0, 16384]`. /// The range of component `o` is `[0, 16384]`. @@ -1781,57 +1869,57 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_101_000 picoseconds. - Weight::from_parts(2_481_218, 0) + // Minimum execution time: 5_250_000 picoseconds. + Weight::from_parts(2_465_568, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(242, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(201, 0).saturating_mul(n.into())) // Standard Error: 0 - .saturating_add(Weight::from_parts(300, 0).saturating_mul(o.into())) + .saturating_add(Weight::from_parts(223, 0).saturating_mul(o.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_clear_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_059_000 picoseconds. - Weight::from_parts(2_426_609, 0) - // Standard Error: 2 - .saturating_add(Weight::from_parts(307, 0).saturating_mul(n.into())) + // Minimum execution time: 2_012_000 picoseconds. + Weight::from_parts(2_288_004, 0) + // Standard Error: 3 + .saturating_add(Weight::from_parts(239, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_get_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_918_000 picoseconds. - Weight::from_parts(2_114_837, 0) + // Minimum execution time: 1_906_000 picoseconds. + Weight::from_parts(2_121_040, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(302, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(225, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_contains_transient_storage(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_759_000 picoseconds. - Weight::from_parts(1_959_995, 0) + // Minimum execution time: 1_736_000 picoseconds. + Weight::from_parts(1_954_728, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(147, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(111, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 16384]`. fn seal_take_transient_storage(_n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_759_000 picoseconds. - Weight::from_parts(9_952_099, 0) + // Minimum execution time: 7_872_000 picoseconds. + Weight::from_parts(8_125_644, 0) } fn seal_transfer() -> Weight { // Proof Size summary in bytes: // Measured: `140` // Estimated: `0` - // Minimum execution time: 8_700_000 picoseconds. - Weight::from_parts(8_903_000, 0) + // Minimum execution time: 8_489_000 picoseconds. + Weight::from_parts(8_791_000, 0) } /// Storage: `Contracts::ContractInfoOf` (r:1 w:1) /// Proof: `Contracts::ContractInfoOf` (`max_values`: None, `max_size`: Some(1795), added: 4270, mode: `Measured`) @@ -1847,10 +1935,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `620 + t * (280 ±0)` // Estimated: `4085 + t * (2182 ±0)` - // Minimum execution time: 123_399_000 picoseconds. - Weight::from_parts(120_909_821, 4085) - // Standard Error: 166_830 - .saturating_add(Weight::from_parts(43_853_642, 0).saturating_mul(t.into())) + // Minimum execution time: 122_759_000 picoseconds. + Weight::from_parts(120_016_020, 4085) + // Standard Error: 173_118 + .saturating_add(Weight::from_parts(42_848_338, 0).saturating_mul(t.into())) // Standard Error: 0 .saturating_add(Weight::from_parts(6, 0).saturating_mul(i.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) @@ -1867,8 +1955,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 112_350_000 picoseconds. - Weight::from_parts(116_003_000, 3895) + // Minimum execution time: 111_566_000 picoseconds. + Weight::from_parts(115_083_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) @@ -1887,12 +1975,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `676` // Estimated: `4132` - // Minimum execution time: 1_972_276_000 picoseconds. - Weight::from_parts(1_977_872_000, 4132) + // Minimum execution time: 1_871_402_000 picoseconds. + Weight::from_parts(1_890_038_000, 4132) // Standard Error: 24 - .saturating_add(Weight::from_parts(623, 0).saturating_mul(i.into())) + .saturating_add(Weight::from_parts(581, 0).saturating_mul(i.into())) // Standard Error: 24 - .saturating_add(Weight::from_parts(917, 0).saturating_mul(s.into())) + .saturating_add(Weight::from_parts(915, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1901,64 +1989,64 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 899_000 picoseconds. - Weight::from_parts(10_963_972, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_355, 0).saturating_mul(n.into())) + // Minimum execution time: 966_000 picoseconds. + Weight::from_parts(9_599_151, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_336, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_keccak_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_396_000 picoseconds. - Weight::from_parts(9_404_986, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(3_627, 0).saturating_mul(n.into())) + // Minimum execution time: 1_416_000 picoseconds. + Weight::from_parts(10_964_255, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(3_593, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_256(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 834_000 picoseconds. - Weight::from_parts(9_749_716, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(6_579_283, 0) // Standard Error: 0 - .saturating_add(Weight::from_parts(1_500, 0).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(1_466, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 1048576]`. fn seal_hash_blake2_128(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 756_000 picoseconds. - Weight::from_parts(8_995_036, 0) - // Standard Error: 0 - .saturating_add(Weight::from_parts(1_495, 0).saturating_mul(n.into())) + // Minimum execution time: 773_000 picoseconds. + Weight::from_parts(10_990_209, 0) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(n.into())) } /// The range of component `n` is `[0, 125697]`. fn seal_sr25519_verify(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 45_800_000 picoseconds. - Weight::from_parts(44_676_829, 0) - // Standard Error: 8 - .saturating_add(Weight::from_parts(5_315, 0).saturating_mul(n.into())) + // Minimum execution time: 43_195_000 picoseconds. + Weight::from_parts(41_864_855, 0) + // Standard Error: 9 + .saturating_add(Weight::from_parts(5_154, 0).saturating_mul(n.into())) } fn seal_ecdsa_recover() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 47_415_000 picoseconds. - Weight::from_parts(48_743_000, 0) + // Minimum execution time: 47_747_000 picoseconds. + Weight::from_parts(49_219_000, 0) } fn seal_ecdsa_to_eth_address() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 13_437_000 picoseconds. - Weight::from_parts(13_588_000, 0) + // Minimum execution time: 12_854_000 picoseconds. + Weight::from_parts(12_962_000, 0) } /// Storage: `Contracts::CodeInfoOf` (r:1 w:1) /// Proof: `Contracts::CodeInfoOf` (`max_values`: None, `max_size`: Some(93), added: 2568, mode: `Measured`) @@ -1968,8 +2056,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `430` // Estimated: `3895` - // Minimum execution time: 17_775_000 picoseconds. - Weight::from_parts(18_332_000, 3895) + // Minimum execution time: 17_868_000 picoseconds. + Weight::from_parts(18_486_000, 3895) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1979,8 +2067,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3820` - // Minimum execution time: 8_326_000 picoseconds. - Weight::from_parts(8_656_000, 3820) + // Minimum execution time: 8_393_000 picoseconds. + Weight::from_parts(8_640_000, 3820) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1990,8 +2078,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `355` // Estimated: `3558` - // Minimum execution time: 7_276_000 picoseconds. - Weight::from_parts(7_630_000, 3558) + // Minimum execution time: 7_489_000 picoseconds. + Weight::from_parts(7_815_000, 3558) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1999,15 +2087,15 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 330_000 picoseconds. - Weight::from_parts(373_000, 0) + // Minimum execution time: 299_000 picoseconds. + Weight::from_parts(339_000, 0) } fn seal_account_reentrance_count() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 381_000 picoseconds. - Weight::from_parts(418_000, 0) + // Minimum execution time: 324_000 picoseconds. + Weight::from_parts(380_000, 0) } /// Storage: `Contracts::Nonce` (r:1 w:0) /// Proof: `Contracts::Nonce` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `Measured`) @@ -2015,8 +2103,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `219` // Estimated: `1704` - // Minimum execution time: 2_711_000 picoseconds. - Weight::from_parts(2_941_000, 1704) + // Minimum execution time: 2_768_000 picoseconds. + Weight::from_parts(3_025_000, 1704) .saturating_add(RocksDbWeight::get().reads(1_u64)) } /// The range of component `r` is `[0, 5000]`. @@ -2024,9 +2112,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 720_000 picoseconds. - Weight::from_parts(389_111, 0) - // Standard Error: 16 - .saturating_add(Weight::from_parts(7_278, 0).saturating_mul(r.into())) + // Minimum execution time: 766_000 picoseconds. + Weight::from_parts(722_169, 0) + // Standard Error: 10 + .saturating_add(Weight::from_parts(7_191, 0).saturating_mul(r.into())) } }