diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml
index 6b72075c513b..38c5332f3097 100644
--- a/.gitlab/pipeline/zombienet/polkadot.yml
+++ b/.gitlab/pipeline/zombienet/polkadot.yml
@@ -176,6 +176,14 @@ zombienet-polkadot-elastic-scaling-0002-elastic-scaling-doesnt-break-parachains:
--local-dir="${LOCAL_DIR}/elastic_scaling"
--test="0002-elastic-scaling-doesnt-break-parachains.zndsl"
+zombienet-polkadot-functional-0012-spam-statement-distribution-requests:
+ extends:
+ - .zombienet-polkadot-common
+ script:
+ - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh
+ --local-dir="${LOCAL_DIR}/functional"
+ --test="0012-spam-statement-distribution-requests.zndsl"
+
zombienet-polkadot-smoke-0001-parachains-smoke-test:
extends:
- .zombienet-polkadot-common
diff --git a/Cargo.lock b/Cargo.lock
index 5119c2c72c4d..96303018b417 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8441,6 +8441,19 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
+[[package]]
+name = "minimal-template"
+version = "0.0.0"
+dependencies = [
+ "docify",
+ "minimal-template-node",
+ "minimal-template-runtime",
+ "pallet-minimal-template",
+ "polkadot-sdk-docs",
+ "polkadot-sdk-frame",
+ "simple-mermaid",
+]
+
[[package]]
name = "minimal-template-node"
version = "0.0.0"
@@ -14226,6 +14239,7 @@ dependencies = [
"polkadot-node-core-pvf-common",
"polkadot-node-core-pvf-execute-worker",
"polkadot-node-core-pvf-prepare-worker",
+ "polkadot-node-network-protocol",
"polkadot-node-primitives",
"polkadot-node-subsystem",
"polkadot-node-subsystem-test-helpers",
diff --git a/Cargo.toml b/Cargo.toml
index 42a6bc8abe1e..1d3f3d8e9ecd 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -513,6 +513,7 @@ members = [
"substrate/utils/substrate-bip39",
"substrate/utils/wasm-builder",
+ "templates/minimal",
"templates/minimal/node",
"templates/minimal/pallets/template",
"templates/minimal/runtime",
diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml
index 2f63c2f0938d..750074fa9b3c 100644
--- a/polkadot/node/malus/Cargo.toml
+++ b/polkadot/node/malus/Cargo.toml
@@ -37,6 +37,7 @@ polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator"
polkadot-node-core-candidate-validation = { path = "../core/candidate-validation" }
polkadot-node-core-backing = { path = "../core/backing" }
polkadot-node-primitives = { path = "../primitives" }
+polkadot-node-network-protocol = { path = "../network/protocol" }
polkadot-primitives = { path = "../../primitives" }
color-eyre = { version = "0.6.1", default-features = false }
assert_matches = "1.5"
diff --git a/polkadot/node/malus/src/malus.rs b/polkadot/node/malus/src/malus.rs
index 7a9e320e2736..6257201537c8 100644
--- a/polkadot/node/malus/src/malus.rs
+++ b/polkadot/node/malus/src/malus.rs
@@ -40,6 +40,8 @@ enum NemesisVariant {
DisputeAncestor(DisputeAncestorOptions),
/// Delayed disputing of finalized candidates.
DisputeFinalizedCandidates(DisputeFinalizedCandidatesOptions),
+ /// Spam many request statements instead of sending a single one.
+ SpamStatementRequests(SpamStatementRequestsOptions),
}
#[derive(Debug, Parser)]
@@ -98,6 +100,11 @@ impl MalusCli {
finality_delay,
)?
},
+ NemesisVariant::SpamStatementRequests(opts) => {
+ let SpamStatementRequestsOptions { spam_factor, cli } = opts;
+
+ polkadot_cli::run_node(cli, SpamStatementRequests { spam_factor }, finality_delay)?
+ },
}
Ok(())
}
diff --git a/polkadot/node/malus/src/variants/mod.rs b/polkadot/node/malus/src/variants/mod.rs
index 3ca1bf4b4696..ec945ae19457 100644
--- a/polkadot/node/malus/src/variants/mod.rs
+++ b/polkadot/node/malus/src/variants/mod.rs
@@ -20,6 +20,7 @@ mod back_garbage_candidate;
mod common;
mod dispute_finalized_candidates;
mod dispute_valid_candidates;
+mod spam_statement_requests;
mod suggest_garbage_candidate;
mod support_disabled;
@@ -27,6 +28,7 @@ pub(crate) use self::{
back_garbage_candidate::{BackGarbageCandidateOptions, BackGarbageCandidates},
dispute_finalized_candidates::{DisputeFinalizedCandidates, DisputeFinalizedCandidatesOptions},
dispute_valid_candidates::{DisputeAncestorOptions, DisputeValidCandidates},
+ spam_statement_requests::{SpamStatementRequests, SpamStatementRequestsOptions},
suggest_garbage_candidate::{SuggestGarbageCandidateOptions, SuggestGarbageCandidates},
support_disabled::{SupportDisabled, SupportDisabledOptions},
};
diff --git a/polkadot/node/malus/src/variants/spam_statement_requests.rs b/polkadot/node/malus/src/variants/spam_statement_requests.rs
new file mode 100644
index 000000000000..c5970c988ac2
--- /dev/null
+++ b/polkadot/node/malus/src/variants/spam_statement_requests.rs
@@ -0,0 +1,155 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! A malicious node variant that attempts spam statement requests.
+//!
+//! This malus variant behaves honestly in everything except when propagating statement distribution
+//! requests through the network bridge subsystem. Instead of sending a single request when it needs
+//! something it attempts to spam the peer with multiple requests.
+//!
+//! Attention: For usage with `zombienet` only!
+
+#![allow(missing_docs)]
+
+use polkadot_cli::{
+ service::{
+ AuxStore, Error, ExtendedOverseerGenArgs, Overseer, OverseerConnector, OverseerGen,
+ OverseerGenArgs, OverseerHandle,
+ },
+ validator_overseer_builder, Cli,
+};
+use polkadot_node_network_protocol::request_response::{outgoing::Requests, OutgoingRequest};
+use polkadot_node_subsystem::{messages::NetworkBridgeTxMessage, SpawnGlue};
+use polkadot_node_subsystem_types::{ChainApiBackend, RuntimeApiSubsystemClient};
+use sp_core::traits::SpawnNamed;
+
+// Filter wrapping related types.
+use crate::{interceptor::*, shared::MALUS};
+
+use std::sync::Arc;
+
+/// Wraps around network bridge and replaces it.
+#[derive(Clone)]
+struct RequestSpammer {
+ spam_factor: u32, // How many statement distribution requests to send.
+}
+
+impl MessageInterceptor for RequestSpammer
+where
+ Sender: overseer::NetworkBridgeTxSenderTrait + Clone + Send + 'static,
+{
+ type Message = NetworkBridgeTxMessage;
+
+ /// Intercept NetworkBridgeTxMessage::SendRequests with Requests::AttestedCandidateV2 inside and
+ /// duplicate that request
+ fn intercept_incoming(
+ &self,
+ _subsystem_sender: &mut Sender,
+ msg: FromOrchestra,
+ ) -> Option> {
+ match msg {
+ FromOrchestra::Communication {
+ msg: NetworkBridgeTxMessage::SendRequests(requests, if_disconnected),
+ } => {
+ let mut new_requests = Vec::new();
+
+ for request in requests {
+ match request {
+ Requests::AttestedCandidateV2(ref req) => {
+ // Temporarily store peer and payload for duplication
+ let peer_to_duplicate = req.peer.clone();
+ let payload_to_duplicate = req.payload.clone();
+ // Push the original request
+ new_requests.push(request);
+
+ // Duplicate for spam purposes
+ gum::info!(
+ target: MALUS,
+ "😈 Duplicating AttestedCandidateV2 request extra {:?} times to peer: {:?}.", self.spam_factor, peer_to_duplicate,
+ );
+ new_requests.extend((0..self.spam_factor - 1).map(|_| {
+ let (new_outgoing_request, _) = OutgoingRequest::new(
+ peer_to_duplicate.clone(),
+ payload_to_duplicate.clone(),
+ );
+ Requests::AttestedCandidateV2(new_outgoing_request)
+ }));
+ },
+ _ => {
+ new_requests.push(request);
+ },
+ }
+ }
+
+ // Passthrough the message with a potentially modified number of requests
+ Some(FromOrchestra::Communication {
+ msg: NetworkBridgeTxMessage::SendRequests(new_requests, if_disconnected),
+ })
+ },
+ FromOrchestra::Communication { msg } => Some(FromOrchestra::Communication { msg }),
+ FromOrchestra::Signal(signal) => Some(FromOrchestra::Signal(signal)),
+ }
+ }
+}
+
+//----------------------------------------------------------------------------------
+
+#[derive(Debug, clap::Parser)]
+#[clap(rename_all = "kebab-case")]
+#[allow(missing_docs)]
+pub struct SpamStatementRequestsOptions {
+ /// How many statement distribution requests to send.
+ #[clap(long, ignore_case = true, default_value_t = 1000, value_parser = clap::value_parser!(u32).range(0..=10000000))]
+ pub spam_factor: u32,
+
+ #[clap(flatten)]
+ pub cli: Cli,
+}
+
+/// SpamStatementRequests implementation wrapper which implements `OverseerGen` glue.
+pub(crate) struct SpamStatementRequests {
+ /// How many statement distribution requests to send.
+ pub spam_factor: u32,
+}
+
+impl OverseerGen for SpamStatementRequests {
+ fn generate(
+ &self,
+ connector: OverseerConnector,
+ args: OverseerGenArgs<'_, Spawner, RuntimeClient>,
+ ext_args: Option,
+ ) -> Result<(Overseer, Arc>, OverseerHandle), Error>
+ where
+ RuntimeClient: RuntimeApiSubsystemClient + ChainApiBackend + AuxStore + 'static,
+ Spawner: 'static + SpawnNamed + Clone + Unpin,
+ {
+ gum::info!(
+ target: MALUS,
+ "😈 Started Malus node that duplicates each statement distribution request spam_factor = {:?} times.",
+ &self.spam_factor,
+ );
+
+ let request_spammer = RequestSpammer { spam_factor: self.spam_factor };
+
+ validator_overseer_builder(
+ args,
+ ext_args.expect("Extended arguments required to build validator overseer are provided"),
+ )?
+ .replace_network_bridge_tx(move |cb| InterceptedSubsystem::new(cb, request_spammer))
+ .build_with_connector(connector)
+ .map_err(|e| e.into())
+ }
+}
diff --git a/polkadot/node/network/statement-distribution/src/lib.rs b/polkadot/node/network/statement-distribution/src/lib.rs
index 7e91d2849120..4ca199c3378b 100644
--- a/polkadot/node/network/statement-distribution/src/lib.rs
+++ b/polkadot/node/network/statement-distribution/src/lib.rs
@@ -207,6 +207,7 @@ impl StatementDistributionSubsystem {
v2::respond_task(
self.req_receiver.take().expect("Mandatory argument to new. qed"),
res_sender.clone(),
+ self.metrics.clone(),
)
.boxed(),
)
diff --git a/polkadot/node/network/statement-distribution/src/metrics.rs b/polkadot/node/network/statement-distribution/src/metrics.rs
index b9a51dc89d61..1bc994174263 100644
--- a/polkadot/node/network/statement-distribution/src/metrics.rs
+++ b/polkadot/node/network/statement-distribution/src/metrics.rs
@@ -24,14 +24,19 @@ const HISTOGRAM_LATENCY_BUCKETS: &[f64] = &[
#[derive(Clone)]
struct MetricsInner {
+ // V1
statements_distributed: prometheus::Counter,
sent_requests: prometheus::Counter,
received_responses: prometheus::CounterVec,
- active_leaves_update: prometheus::Histogram,
- share: prometheus::Histogram,
network_bridge_update: prometheus::HistogramVec,
statements_unexpected: prometheus::CounterVec,
created_message_size: prometheus::Gauge,
+ // V1+
+ active_leaves_update: prometheus::Histogram,
+ share: prometheus::Histogram,
+ // V2+
+ peer_rate_limit_request_drop: prometheus::Counter,
+ max_parallel_requests_reached: prometheus::Counter,
}
/// Statement Distribution metrics.
@@ -114,6 +119,23 @@ impl Metrics {
metrics.created_message_size.set(size as u64);
}
}
+
+ /// Update sent dropped requests counter when request dropped because
+ /// of peer rate limit
+ pub fn on_request_dropped_peer_rate_limit(&self) {
+ if let Some(metrics) = &self.0 {
+ metrics.peer_rate_limit_request_drop.inc();
+ }
+ }
+
+ /// Update max parallel requests reached counter
+ /// This counter is updated when the maximum number of parallel requests is reached
+ /// and we are waiting for one of the requests to finish
+ pub fn on_max_parallel_requests_reached(&self) {
+ if let Some(metrics) = &self.0 {
+ metrics.max_parallel_requests_reached.inc();
+ }
+ }
}
impl metrics::Metrics for Metrics {
@@ -193,6 +215,20 @@ impl metrics::Metrics for Metrics {
))?,
registry,
)?,
+ peer_rate_limit_request_drop: prometheus::register(
+ prometheus::Counter::new(
+ "polkadot_parachain_statement_distribution_peer_rate_limit_request_drop_total",
+ "Number of statement distribution requests dropped because of the peer rate limiting.",
+ )?,
+ registry,
+ )?,
+ max_parallel_requests_reached: prometheus::register(
+ prometheus::Counter::new(
+ "polkadot_parachain_statement_distribution_max_parallel_requests_reached_total",
+ "Number of times the maximum number of parallel requests was reached.",
+ )?,
+ registry,
+ )?,
};
Ok(Metrics(Some(metrics)))
}
diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs
index 118e34e92063..8579ac15cbc1 100644
--- a/polkadot/node/network/statement-distribution/src/v2/mod.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs
@@ -59,6 +59,8 @@ use sp_keystore::KeystorePtr;
use fatality::Nested;
use futures::{
channel::{mpsc, oneshot},
+ future::FutureExt,
+ select,
stream::FuturesUnordered,
SinkExt, StreamExt,
};
@@ -73,6 +75,7 @@ use std::{
use crate::{
error::{JfyiError, JfyiErrorResult},
+ metrics::Metrics,
LOG_TARGET,
};
use candidates::{BadAdvertisement, Candidates, PostConfirmation};
@@ -3423,35 +3426,61 @@ pub(crate) struct ResponderMessage {
pub(crate) async fn respond_task(
mut receiver: IncomingRequestReceiver,
mut sender: mpsc::Sender,
+ metrics: Metrics,
) {
let mut pending_out = FuturesUnordered::new();
+ let mut active_peers = HashSet::new();
+
loop {
- // Ensure we are not handling too many requests in parallel.
- if pending_out.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize {
- // Wait for one to finish:
- pending_out.next().await;
- }
+ select! {
+ // New request
+ request_result = receiver.recv(|| vec![COST_INVALID_REQUEST]).fuse() => {
+ let request = match request_result.into_nested() {
+ Ok(Ok(v)) => v,
+ Err(fatal) => {
+ gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder");
+ return
+ },
+ Ok(Err(jfyi)) => {
+ gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed");
+ continue
+ },
+ };
- let req = match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() {
- Ok(Ok(v)) => v,
- Err(fatal) => {
- gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder");
- return
+ // If peer currently being served drop request
+ if active_peers.contains(&request.peer) {
+ gum::trace!(target: LOG_TARGET, "Peer already being served, dropping request");
+ metrics.on_request_dropped_peer_rate_limit();
+ continue
+ }
+
+ // If we are over parallel limit wait for one to finish
+ if pending_out.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize {
+ gum::trace!(target: LOG_TARGET, "Over max parallel requests, waiting for one to finish");
+ metrics.on_max_parallel_requests_reached();
+ let (_, peer) = pending_out.select_next_some().await;
+ active_peers.remove(&peer);
+ }
+
+ // Start serving the request
+ let (pending_sent_tx, pending_sent_rx) = oneshot::channel();
+ let peer = request.peer;
+ if let Err(err) = sender
+ .feed(ResponderMessage { request, sent_feedback: pending_sent_tx })
+ .await
+ {
+ gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder");
+ return
+ }
+ let future_with_peer = pending_sent_rx.map(move |result| (result, peer));
+ pending_out.push(future_with_peer);
+ active_peers.insert(peer);
},
- Ok(Err(jfyi)) => {
- gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed");
- continue
+ // Request served/finished
+ result = pending_out.select_next_some() => {
+ let (_, peer) = result;
+ active_peers.remove(&peer);
},
- };
-
- let (pending_sent_tx, pending_sent_rx) = oneshot::channel();
- if let Err(err) = sender
- .feed(ResponderMessage { request: req, sent_feedback: pending_sent_tx })
- .await
- {
- gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder");
- return
}
- pending_out.push(pending_sent_rx);
}
}
diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs
index 1ed18ffd42a9..b8ed34d26c8a 100644
--- a/polkadot/node/network/statement-distribution/src/v2/requests.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs
@@ -366,6 +366,7 @@ impl RequestManager {
id,
&props,
&peer_advertised,
+ &response_manager,
) {
None => continue,
Some(t) => t,
@@ -387,14 +388,17 @@ impl RequestManager {
);
let stored_id = id.clone();
- response_manager.push(Box::pin(async move {
- TaggedResponse {
- identifier: stored_id,
- requested_peer: target,
- props,
- response: response_fut.await,
- }
- }));
+ response_manager.push(
+ Box::pin(async move {
+ TaggedResponse {
+ identifier: stored_id,
+ requested_peer: target,
+ props,
+ response: response_fut.await,
+ }
+ }),
+ target,
+ );
entry.in_flight = true;
@@ -422,28 +426,35 @@ impl RequestManager {
/// A manager for pending responses.
pub struct ResponseManager {
pending_responses: FuturesUnordered>,
+ active_peers: HashSet,
}
impl ResponseManager {
pub fn new() -> Self {
- Self { pending_responses: FuturesUnordered::new() }
+ Self { pending_responses: FuturesUnordered::new(), active_peers: HashSet::new() }
}
/// Await the next incoming response to a sent request, or immediately
/// return `None` if there are no pending responses.
pub async fn incoming(&mut self) -> Option {
- self.pending_responses
- .next()
- .await
- .map(|response| UnhandledResponse { response })
+ self.pending_responses.next().await.map(|response| {
+ self.active_peers.remove(&response.requested_peer);
+ UnhandledResponse { response }
+ })
}
fn len(&self) -> usize {
self.pending_responses.len()
}
- fn push(&mut self, response: BoxFuture<'static, TaggedResponse>) {
+ fn push(&mut self, response: BoxFuture<'static, TaggedResponse>, target: PeerId) {
self.pending_responses.push(response);
+ self.active_peers.insert(target);
+ }
+
+ /// Returns true if we are currently sending a request to the peer.
+ fn is_sending_to(&self, peer: &PeerId) -> bool {
+ self.active_peers.contains(peer)
}
}
@@ -471,10 +482,16 @@ fn find_request_target_with_update(
candidate_identifier: &CandidateIdentifier,
props: &RequestProperties,
peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option,
+ response_manager: &ResponseManager,
) -> Option {
let mut prune = Vec::new();
let mut target = None;
for (i, p) in known_by.iter().enumerate() {
+ // If we are already sending to that peer, skip for now
+ if response_manager.is_sending_to(p) {
+ continue
+ }
+
let mut filter = match peer_advertised(candidate_identifier, p) {
None => {
prune.push(i);
@@ -1002,7 +1019,8 @@ mod tests {
candidate_receipt.descriptor.persisted_validation_data_hash =
persisted_validation_data.hash();
let candidate = candidate_receipt.hash();
- let requested_peer = PeerId::random();
+ let requested_peer_1 = PeerId::random();
+ let requested_peer_2 = PeerId::random();
let identifier1 = request_manager
.get_or_insert(relay_parent, candidate, 1.into())
@@ -1010,14 +1028,14 @@ mod tests {
.clone();
request_manager
.get_or_insert(relay_parent, candidate, 1.into())
- .add_peer(requested_peer);
+ .add_peer(requested_peer_1);
let identifier2 = request_manager
.get_or_insert(relay_parent, candidate, 2.into())
.identifier
.clone();
request_manager
.get_or_insert(relay_parent, candidate, 2.into())
- .add_peer(requested_peer);
+ .add_peer(requested_peer_2);
assert_ne!(identifier1, identifier2);
assert_eq!(request_manager.requests.len(), 2);
@@ -1053,7 +1071,7 @@ mod tests {
let response = UnhandledResponse {
response: TaggedResponse {
identifier: identifier1,
- requested_peer,
+ requested_peer: requested_peer_1,
props: request_properties.clone(),
response: Ok(AttestedCandidateResponse {
candidate_receipt: candidate_receipt.clone(),
@@ -1076,13 +1094,13 @@ mod tests {
assert_eq!(
output,
ResponseValidationOutput {
- requested_peer,
+ requested_peer: requested_peer_1,
request_status: CandidateRequestStatus::Complete {
candidate: candidate_receipt.clone(),
persisted_validation_data: persisted_validation_data.clone(),
statements,
},
- reputation_changes: vec![(requested_peer, BENEFIT_VALID_RESPONSE)],
+ reputation_changes: vec![(requested_peer_1, BENEFIT_VALID_RESPONSE)],
}
);
}
@@ -1093,7 +1111,7 @@ mod tests {
let response = UnhandledResponse {
response: TaggedResponse {
identifier: identifier2,
- requested_peer,
+ requested_peer: requested_peer_2,
props: request_properties,
response: Ok(AttestedCandidateResponse {
candidate_receipt: candidate_receipt.clone(),
@@ -1115,12 +1133,14 @@ mod tests {
assert_eq!(
output,
ResponseValidationOutput {
- requested_peer,
+ requested_peer: requested_peer_2,
request_status: CandidateRequestStatus::Outdated,
reputation_changes: vec![],
}
);
}
+
+ assert_eq!(request_manager.requests.len(), 0);
}
// Test case where we had a request in-flight and the request entry was garbage-collected on
@@ -1293,4 +1313,140 @@ mod tests {
assert_eq!(request_manager.requests.len(), 0);
assert_eq!(request_manager.by_priority.len(), 0);
}
+
+ // Test case where we queue 2 requests to be sent to the same peer and 1 request to another
+ // peer. Same peer requests should be served one at a time but they should not block the other
+ // peer request.
+ #[test]
+ fn rate_limit_requests_to_same_peer() {
+ let mut request_manager = RequestManager::new();
+ let mut response_manager = ResponseManager::new();
+
+ let relay_parent = Hash::from_low_u64_le(1);
+
+ // Create 3 candidates
+ let mut candidate_receipt_1 = test_helpers::dummy_committed_candidate_receipt(relay_parent);
+ let persisted_validation_data_1 = dummy_pvd();
+ candidate_receipt_1.descriptor.persisted_validation_data_hash =
+ persisted_validation_data_1.hash();
+ let candidate_1 = candidate_receipt_1.hash();
+
+ let mut candidate_receipt_2 = test_helpers::dummy_committed_candidate_receipt(relay_parent);
+ let persisted_validation_data_2 = dummy_pvd();
+ candidate_receipt_2.descriptor.persisted_validation_data_hash =
+ persisted_validation_data_2.hash();
+ let candidate_2 = candidate_receipt_2.hash();
+
+ let mut candidate_receipt_3 = test_helpers::dummy_committed_candidate_receipt(relay_parent);
+ let persisted_validation_data_3 = dummy_pvd();
+ candidate_receipt_3.descriptor.persisted_validation_data_hash =
+ persisted_validation_data_3.hash();
+ let candidate_3 = candidate_receipt_3.hash();
+
+ // Create 2 peers
+ let requested_peer_1 = PeerId::random();
+ let requested_peer_2 = PeerId::random();
+
+ let group_size = 3;
+ let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)];
+ let unwanted_mask = StatementFilter::blank(group_size);
+ let disabled_mask: BitVec = Default::default();
+ let request_properties = RequestProperties { unwanted_mask, backing_threshold: None };
+ let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone());
+ let peer_advertised =
+ |_identifier: &CandidateIdentifier, _peer: &_| Some(StatementFilter::full(group_size));
+
+ // Add request for candidate 1 from peer 1
+ let identifier1 = request_manager
+ .get_or_insert(relay_parent, candidate_1, 1.into())
+ .identifier
+ .clone();
+ request_manager
+ .get_or_insert(relay_parent, candidate_1, 1.into())
+ .add_peer(requested_peer_1);
+
+ // Add request for candidate 3 from peer 2 (this one can be served in parallel)
+ let _identifier3 = request_manager
+ .get_or_insert(relay_parent, candidate_3, 1.into())
+ .identifier
+ .clone();
+ request_manager
+ .get_or_insert(relay_parent, candidate_3, 1.into())
+ .add_peer(requested_peer_2);
+
+ // Successfully dispatch request for candidate 1 from peer 1 and candidate 3 from peer 2
+ for _ in 0..2 {
+ let outgoing =
+ request_manager.next_request(&mut response_manager, request_props, peer_advertised);
+ assert!(outgoing.is_some());
+ }
+ assert_eq!(response_manager.active_peers.len(), 2);
+ assert!(response_manager.is_sending_to(&requested_peer_1));
+ assert!(response_manager.is_sending_to(&requested_peer_2));
+ assert_eq!(request_manager.requests.len(), 2);
+
+ // Add request for candidate 2 from peer 1
+ let _identifier2 = request_manager
+ .get_or_insert(relay_parent, candidate_2, 1.into())
+ .identifier
+ .clone();
+ request_manager
+ .get_or_insert(relay_parent, candidate_2, 1.into())
+ .add_peer(requested_peer_1);
+
+ // Do not dispatch the request for the second candidate from peer 1 (already serving that
+ // peer)
+ let outgoing =
+ request_manager.next_request(&mut response_manager, request_props, peer_advertised);
+ assert!(outgoing.is_none());
+ assert_eq!(response_manager.active_peers.len(), 2);
+ assert!(response_manager.is_sending_to(&requested_peer_1));
+ assert!(response_manager.is_sending_to(&requested_peer_2));
+ assert_eq!(request_manager.requests.len(), 3);
+
+ // Manually mark response received (response future resolved)
+ response_manager.active_peers.remove(&requested_peer_1);
+ response_manager.pending_responses = FuturesUnordered::new();
+
+ // Validate first response (candidate 1 from peer 1)
+ {
+ let statements = vec![];
+ let response = UnhandledResponse {
+ response: TaggedResponse {
+ identifier: identifier1,
+ requested_peer: requested_peer_1,
+ props: request_properties.clone(),
+ response: Ok(AttestedCandidateResponse {
+ candidate_receipt: candidate_receipt_1.clone(),
+ persisted_validation_data: persisted_validation_data_1.clone(),
+ statements,
+ }),
+ },
+ };
+ let validator_key_lookup = |_v| None;
+ let allowed_para_lookup = |_para, _g_index| true;
+ let _output = response.validate_response(
+ &mut request_manager,
+ group,
+ 0,
+ validator_key_lookup,
+ allowed_para_lookup,
+ disabled_mask.clone(),
+ );
+
+ // First request served successfully
+ assert_eq!(request_manager.requests.len(), 2);
+ assert_eq!(response_manager.active_peers.len(), 1);
+ assert!(response_manager.is_sending_to(&requested_peer_2));
+ }
+
+ // Check if the request that was ignored previously will be served now
+ let outgoing =
+ request_manager.next_request(&mut response_manager, request_props, peer_advertised);
+ assert!(outgoing.is_some());
+ assert_eq!(response_manager.active_peers.len(), 2);
+ assert!(response_manager.is_sending_to(&requested_peer_1));
+ assert!(response_manager.is_sending_to(&requested_peer_2));
+ assert_eq!(request_manager.requests.len(), 2);
+ }
}
diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
index 8cf139802148..c9de42d2c468 100644
--- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
+++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs
@@ -1891,7 +1891,7 @@ fn local_node_sanity_checks_incoming_requests() {
let mask = StatementFilter::blank(state.config.group_size + 1);
let response = state
.send_request(
- peer_c,
+ peer_a,
request_v2::AttestedCandidateRequest { candidate_hash: candidate.hash(), mask },
)
.await
diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs
index c6cfb7a27d04..89613040dca1 100644
--- a/polkadot/node/service/src/fake_runtime_api.rs
+++ b/polkadot/node/service/src/fake_runtime_api.rs
@@ -242,7 +242,7 @@ sp_api::impl_runtime_apis! {
}
fn submit_report_equivocation_unsigned_extrinsic(
- _: beefy_primitives::EquivocationProof<
+ _: beefy_primitives::DoubleVotingProof<
BlockNumber,
BeefyId,
BeefySignature,
diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs
index b5dad6c6e864..34923897f02b 100644
--- a/polkadot/runtime/parachains/src/configuration.rs
+++ b/polkadot/runtime/parachains/src/configuration.rs
@@ -30,7 +30,7 @@ use primitives::{
NodeFeatures, SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE,
MAX_POV_SIZE, ON_DEMAND_MAX_QUEUE_MAX_SIZE,
};
-use sp_runtime::{traits::Zero, Perbill};
+use sp_runtime::{traits::Zero, Perbill, Percent};
use sp_std::prelude::*;
#[cfg(test)]
@@ -1460,3 +1460,16 @@ impl Pallet {
Ok(())
}
}
+
+/// The implementation of `Get<(u32, u32)>` which reads `ActiveConfig` and returns `P` percent of
+/// `hrmp_channel_max_message_size` / `hrmp_channel_max_capacity`.
+pub struct ActiveConfigHrmpChannelSizeAndCapacityRatio(sp_std::marker::PhantomData<(T, P)>);
+impl> Get<(u32, u32)>
+ for ActiveConfigHrmpChannelSizeAndCapacityRatio
+{
+ fn get() -> (u32, u32) {
+ let config = ActiveConfig::::get();
+ let percent = P::get();
+ (percent * config.hrmp_channel_max_message_size, percent * config.hrmp_channel_max_capacity)
+ }
+}
diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs
index 239b466fde39..64bbb8481fc1 100644
--- a/polkadot/runtime/parachains/src/configuration/tests.rs
+++ b/polkadot/runtime/parachains/src/configuration/tests.rs
@@ -17,7 +17,7 @@
use super::*;
use crate::{
configuration,
- mock::{new_test_ext, Configuration, ParasShared, RuntimeOrigin, Test},
+ mock::{new_test_ext, Configuration, MockGenesisConfig, ParasShared, RuntimeOrigin, Test},
};
use bitvec::{bitvec, prelude::Lsb0};
use frame_support::{assert_err, assert_noop, assert_ok};
@@ -547,3 +547,51 @@ fn verify_externally_accessible() {
);
});
}
+
+#[test]
+fn active_config_hrmp_channel_size_and_capacity_ratio_works() {
+ frame_support::parameter_types! {
+ pub Ratio100: Percent = Percent::from_percent(100);
+ pub Ratio50: Percent = Percent::from_percent(50);
+ }
+
+ let mut genesis: MockGenesisConfig = Default::default();
+ genesis.configuration.config.hrmp_channel_max_message_size = 1024;
+ genesis.configuration.config.hrmp_channel_max_capacity = 100;
+
+ new_test_ext(genesis).execute_with(|| {
+ let active_config = configuration::ActiveConfig::::get();
+ assert_eq!(active_config.hrmp_channel_max_message_size, 1024);
+ assert_eq!(active_config.hrmp_channel_max_capacity, 100);
+
+ assert_eq!(
+ ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(),
+ (1024, 100)
+ );
+ assert_eq!(ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(), (512, 50));
+
+ // change ActiveConfig
+ assert_ok!(Configuration::set_hrmp_channel_max_message_size(
+ RuntimeOrigin::root(),
+ active_config.hrmp_channel_max_message_size * 4
+ ));
+ assert_ok!(Configuration::set_hrmp_channel_max_capacity(
+ RuntimeOrigin::root(),
+ active_config.hrmp_channel_max_capacity * 4
+ ));
+ on_new_session(1);
+ on_new_session(2);
+ let active_config = configuration::ActiveConfig::::get();
+ assert_eq!(active_config.hrmp_channel_max_message_size, 4096);
+ assert_eq!(active_config.hrmp_channel_max_capacity, 400);
+
+ assert_eq!(
+ ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(),
+ (4096, 400)
+ );
+ assert_eq!(
+ ActiveConfigHrmpChannelSizeAndCapacityRatio::::get(),
+ (2048, 200)
+ );
+ })
+}
diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs
index 1cfe9adfe13d..a52d800ad386 100644
--- a/polkadot/runtime/rococo/src/lib.rs
+++ b/polkadot/runtime/rococo/src/lib.rs
@@ -53,6 +53,7 @@ use runtime_common::{
use runtime_parachains::{
assigner_coretime as parachains_assigner_coretime,
assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration,
+ configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio,
coretime, disputes as parachains_disputes,
disputes::slashing as parachains_slashing,
dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion,
@@ -1033,7 +1034,7 @@ impl pallet_message_queue::Config for Runtime {
impl parachains_dmp::Config for Runtime {}
parameter_types! {
- pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (51200, 500);
+ pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100);
}
impl parachains_hrmp::Config for Runtime {
@@ -1041,7 +1042,10 @@ impl parachains_hrmp::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type ChannelManager = EnsureRoot;
type Currency = Balances;
- type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem;
+ type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio<
+ Runtime,
+ HrmpChannelSizeAndCapacityWithSystemRatio,
+ >;
type WeightInfo = weights::runtime_parachains_hrmp::WeightInfo;
}
@@ -2018,7 +2022,7 @@ sp_api::impl_runtime_apis! {
}
fn submit_report_equivocation_unsigned_extrinsic(
- equivocation_proof: beefy_primitives::EquivocationProof<
+ equivocation_proof: beefy_primitives::DoubleVotingProof<
BlockNumber,
BeefyId,
BeefySignature,
diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs
index 97b84155b36a..1d83e97ef0e5 100644
--- a/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs
+++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_hrmp.rs
@@ -16,25 +16,26 @@
//! Autogenerated weights for `runtime_parachains::hrmp`
//!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-04-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=rococo-dev
// --steps=50
// --repeat=20
-// --pallet=runtime_parachains::hrmp
// --extrinsic=*
-// --execution=wasm
// --wasm-execution=compiled
-// --header=./file_header.txt
-// --output=./runtime/rococo/src/weights/runtime_parachains_hrmp.rs
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=runtime_parachains::hrmp
+// --chain=rococo-dev
+// --header=./polkadot/file_header.txt
+// --output=./polkadot/runtime/rococo/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -47,105 +48,97 @@ use core::marker::PhantomData;
/// Weight functions for `runtime_parachains::hrmp`.
pub struct WeightInfo(PhantomData);
impl runtime_parachains::hrmp::WeightInfo for WeightInfo {
- /// Storage: Paras ParaLifecycles (r:2 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Configuration ActiveConfig (r:1 w:0)
- /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Paras::ParaLifecycles` (r:1 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn hrmp_init_open_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `704`
- // Estimated: `6644`
- // Minimum execution time: 41_564_000 picoseconds.
- Weight::from_parts(42_048_000, 0)
- .saturating_add(Weight::from_parts(0, 6644))
- .saturating_add(T::DbWeight::get().reads(10))
+ // Measured: `488`
+ // Estimated: `3953`
+ // Minimum execution time: 34_034_000 picoseconds.
+ Weight::from_parts(35_191_000, 0)
+ .saturating_add(Weight::from_parts(0, 3953))
+ .saturating_add(T::DbWeight::get().reads(8))
.saturating_add(T::DbWeight::get().writes(5))
}
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Configuration ActiveConfig (r:1 w:0)
- /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Paras ParaLifecycles (r:1 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn hrmp_accept_open_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `936`
- // Estimated: `4401`
- // Minimum execution time: 43_570_000 picoseconds.
- Weight::from_parts(44_089_000, 0)
- .saturating_add(Weight::from_parts(0, 4401))
- .saturating_add(T::DbWeight::get().reads(7))
+ // Measured: `478`
+ // Estimated: `3943`
+ // Minimum execution time: 30_115_000 picoseconds.
+ Weight::from_parts(31_060_000, 0)
+ .saturating_add(Weight::from_parts(0, 3943))
+ .saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(4))
}
- /// Storage: Hrmp HrmpChannels (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpCloseChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Configuration ActiveConfig (r:1 w:0)
- /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpChannels` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn hrmp_close_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `807`
- // Estimated: `4272`
- // Minimum execution time: 36_594_000 picoseconds.
- Weight::from_parts(37_090_000, 0)
- .saturating_add(Weight::from_parts(0, 4272))
- .saturating_add(T::DbWeight::get().reads(6))
+ // Measured: `591`
+ // Estimated: `4056`
+ // Minimum execution time: 30_982_000 picoseconds.
+ Weight::from_parts(32_034_000, 0)
+ .saturating_add(Weight::from_parts(0, 4056))
+ .saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(4))
}
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:254 w:254)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:0 w:1)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannelContents (r:0 w:254)
- /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:0 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:254 w:254)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254)
+ /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `i` is `[0, 127]`.
/// The range of component `e` is `[0, 127]`.
fn force_clean_hrmp(i: u32, e: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `264 + e * (100 ±0) + i * (100 ±0)`
- // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)`
- // Minimum execution time: 1_085_140_000 picoseconds.
- Weight::from_parts(1_100_901_000, 0)
- .saturating_add(Weight::from_parts(0, 3726))
- // Standard Error: 98_982
- .saturating_add(Weight::from_parts(3_229_112, 0).saturating_mul(i.into()))
- // Standard Error: 98_982
- .saturating_add(Weight::from_parts(3_210_944, 0).saturating_mul(e.into()))
+ // Measured: `297 + e * (100 ±0) + i * (100 ±0)`
+ // Estimated: `3759 + e * (2575 ±0) + i * (2575 ±0)`
+ // Minimum execution time: 1_158_665_000 picoseconds.
+ Weight::from_parts(1_164_378_000, 0)
+ .saturating_add(Weight::from_parts(0, 3759))
+ // Standard Error: 103_726
+ .saturating_add(Weight::from_parts(3_444_855, 0).saturating_mul(i.into()))
+ // Standard Error: 103_726
+ .saturating_add(Weight::from_parts(3_527_628, 0).saturating_mul(e.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into())))
.saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into())))
@@ -155,139 +148,139 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf
.saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into()))
.saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into()))
}
- /// Storage: Configuration ActiveConfig (r:1 w:0)
- /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Paras ParaLifecycles (r:256 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:0 w:128)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Paras::ParaLifecycles` (r:256 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:0 w:128)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn force_process_hrmp_open(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `779 + c * (136 ±0)`
- // Estimated: `2234 + c * (5086 ±0)`
- // Minimum execution time: 10_497_000 picoseconds.
- Weight::from_parts(6_987_455, 0)
- .saturating_add(Weight::from_parts(0, 2234))
- // Standard Error: 18_540
- .saturating_add(Weight::from_parts(18_788_534, 0).saturating_mul(c.into()))
- .saturating_add(T::DbWeight::get().reads(2))
+ // Measured: `525 + c * (136 ±0)`
+ // Estimated: `1980 + c * (5086 ±0)`
+ // Minimum execution time: 5_870_000 picoseconds.
+ Weight::from_parts(2_363_864, 0)
+ .saturating_add(Weight::from_parts(0, 1980))
+ // Standard Error: 16_657
+ .saturating_add(Weight::from_parts(20_507_232, 0).saturating_mul(c.into()))
+ .saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into())))
.saturating_add(T::DbWeight::get().writes(1))
.saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into())))
.saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into()))
}
- /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpCloseChannelRequests (r:0 w:128)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannelContents (r:0 w:128)
- /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128)
+ /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128)
+ /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn force_process_hrmp_close(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `335 + c * (124 ±0)`
- // Estimated: `1795 + c * (2600 ±0)`
- // Minimum execution time: 6_575_000 picoseconds.
- Weight::from_parts(1_228_642, 0)
- .saturating_add(Weight::from_parts(0, 1795))
- // Standard Error: 14_826
- .saturating_add(Weight::from_parts(11_604_038, 0).saturating_mul(c.into()))
+ // Measured: `368 + c * (124 ±0)`
+ // Estimated: `1828 + c * (2600 ±0)`
+ // Minimum execution time: 4_766_000 picoseconds.
+ Weight::from_parts(4_988_812, 0)
+ .saturating_add(Weight::from_parts(0, 1828))
+ // Standard Error: 10_606
+ .saturating_add(Weight::from_parts(12_579_429, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into())))
.saturating_add(T::DbWeight::get().writes(1))
.saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into())))
.saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into()))
}
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn hrmp_cancel_open_request(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `1026 + c * (13 ±0)`
- // Estimated: `4295 + c * (15 ±0)`
- // Minimum execution time: 22_301_000 picoseconds.
- Weight::from_parts(26_131_473, 0)
- .saturating_add(Weight::from_parts(0, 4295))
- // Standard Error: 830
- .saturating_add(Weight::from_parts(49_448, 0).saturating_mul(c.into()))
+ // Measured: `1059 + c * (13 ±0)`
+ // Estimated: `4328 + c * (15 ±0)`
+ // Minimum execution time: 17_228_000 picoseconds.
+ Weight::from_parts(27_236_563, 0)
+ .saturating_add(Weight::from_parts(0, 4328))
+ // Standard Error: 2_419
+ .saturating_add(Weight::from_parts(102_107, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(3))
.saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into()))
}
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn clean_open_channel_requests(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `243 + c * (63 ±0)`
- // Estimated: `1722 + c * (2538 ±0)`
- // Minimum execution time: 5_234_000 picoseconds.
- Weight::from_parts(7_350_270, 0)
- .saturating_add(Weight::from_parts(0, 1722))
- // Standard Error: 3_105
- .saturating_add(Weight::from_parts(2_981_935, 0).saturating_mul(c.into()))
+ // Measured: `276 + c * (63 ±0)`
+ // Estimated: `1755 + c * (2538 ±0)`
+ // Minimum execution time: 3_549_000 picoseconds.
+ Weight::from_parts(5_799_542, 0)
+ .saturating_add(Weight::from_parts(0, 1755))
+ // Standard Error: 3_025
+ .saturating_add(Weight::from_parts(3_173_294, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into())))
.saturating_add(T::DbWeight::get().writes(1))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into())))
.saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into()))
}
- /// Storage: Paras ParaLifecycles (r:2 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Configuration ActiveConfig (r:1 w:0)
- /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:2 w:2)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:2 w:2)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- fn force_open_hrmp_channel(_c: u32, ) -> Weight {
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Paras::ParaLifecycles` (r:1 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// The range of component `c` is `[0, 1]`.
+ fn force_open_hrmp_channel(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `704`
- // Estimated: `6644`
- // Minimum execution time: 55_611_000 picoseconds.
- Weight::from_parts(56_488_000, 0)
- .saturating_add(Weight::from_parts(0, 6644))
- .saturating_add(T::DbWeight::get().reads(14))
+ // Measured: `488 + c * (235 ±0)`
+ // Estimated: `6428 + c * (235 ±0)`
+ // Minimum execution time: 48_392_000 picoseconds.
+ Weight::from_parts(50_509_977, 0)
+ .saturating_add(Weight::from_parts(0, 6428))
+ // Standard Error: 133_658
+ .saturating_add(Weight::from_parts(10_215_322, 0).saturating_mul(c.into()))
+ .saturating_add(T::DbWeight::get().reads(12))
.saturating_add(T::DbWeight::get().writes(8))
+ .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into()))
}
/// Storage: `Paras::ParaLifecycles` (r:1 w:0)
/// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -311,11 +304,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf
/// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn establish_system_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `417`
- // Estimated: `6357`
- // Minimum execution time: 629_674_000 picoseconds.
- Weight::from_parts(640_174_000, 0)
- .saturating_add(Weight::from_parts(0, 6357))
+ // Measured: `488`
+ // Estimated: `6428`
+ // Minimum execution time: 48_465_000 picoseconds.
+ Weight::from_parts(50_433_000, 0)
+ .saturating_add(Weight::from_parts(0, 6428))
.saturating_add(T::DbWeight::get().reads(12))
.saturating_add(T::DbWeight::get().writes(8))
}
@@ -323,22 +316,42 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf
/// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn poke_channel_deposits() -> Weight {
// Proof Size summary in bytes:
- // Measured: `263`
- // Estimated: `3728`
- // Minimum execution time: 173_371_000 picoseconds.
- Weight::from_parts(175_860_000, 0)
- .saturating_add(Weight::from_parts(0, 3728))
+ // Measured: `296`
+ // Estimated: `3761`
+ // Minimum execution time: 11_835_000 picoseconds.
+ Weight::from_parts(12_380_000, 0)
+ .saturating_add(Weight::from_parts(0, 3761))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
+ /// Storage: `Paras::ParaLifecycles` (r:2 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:2 w:2)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:2 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:2 w:0)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:2 w:2)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:2 w:0)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:2 w:2)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn establish_channel_with_system() -> Weight {
// Proof Size summary in bytes:
- // Measured: `417`
- // Estimated: `6357`
- // Minimum execution time: 629_674_000 picoseconds.
- Weight::from_parts(640_174_000, 0)
- .saturating_add(Weight::from_parts(0, 6357))
- .saturating_add(T::DbWeight::get().reads(12))
- .saturating_add(T::DbWeight::get().writes(8))
+ // Measured: `488`
+ // Estimated: `6428`
+ // Minimum execution time: 79_633_000 picoseconds.
+ Weight::from_parts(80_846_000, 0)
+ .saturating_add(Weight::from_parts(0, 6428))
+ .saturating_add(T::DbWeight::get().reads(19))
+ .saturating_add(T::DbWeight::get().writes(11))
}
}
diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs
index d0f1ff0035fc..b56e2f52f5f6 100644
--- a/polkadot/runtime/test-runtime/src/lib.rs
+++ b/polkadot/runtime/test-runtime/src/lib.rs
@@ -29,7 +29,9 @@ use sp_std::{
use polkadot_runtime_parachains::{
assigner_parachains as parachains_assigner_parachains,
- configuration as parachains_configuration, disputes as parachains_disputes,
+ configuration as parachains_configuration,
+ configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio,
+ disputes as parachains_disputes,
disputes::slashing as parachains_slashing,
dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion,
initializer as parachains_initializer, origin as parachains_origin, paras as parachains_paras,
@@ -78,7 +80,7 @@ use sp_runtime::{
SaturatedConversion, StaticLookup, Verify,
},
transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity},
- ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill,
+ ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent,
};
use sp_staking::SessionIndex;
#[cfg(any(feature = "std", test))]
@@ -557,7 +559,7 @@ impl parachains_dmp::Config for Runtime {}
parameter_types! {
pub const FirstMessageFactorPercent: u64 = 100;
- pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (51200, 500);
+ pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100);
}
impl parachains_hrmp::Config for Runtime {
@@ -565,7 +567,10 @@ impl parachains_hrmp::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type ChannelManager = frame_system::EnsureRoot;
type Currency = Balances;
- type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem;
+ type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio<
+ Runtime,
+ HrmpChannelSizeAndCapacityWithSystemRatio,
+ >;
type WeightInfo = parachains_hrmp::TestWeightInfo;
}
@@ -1009,7 +1014,7 @@ sp_api::impl_runtime_apis! {
}
fn submit_report_equivocation_unsigned_extrinsic(
- _equivocation_proof: beefy_primitives::EquivocationProof<
+ _equivocation_proof: beefy_primitives::DoubleVotingProof<
BlockNumber,
BeefyId,
BeefySignature,
diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs
index de961bb4c398..f51233cabf06 100644
--- a/polkadot/runtime/westend/src/lib.rs
+++ b/polkadot/runtime/westend/src/lib.rs
@@ -68,6 +68,7 @@ use runtime_common::{
use runtime_parachains::{
assigner_coretime as parachains_assigner_coretime,
assigner_on_demand as parachains_assigner_on_demand, configuration as parachains_configuration,
+ configuration::ActiveConfigHrmpChannelSizeAndCapacityRatio,
coretime, disputes as parachains_disputes,
disputes::slashing as parachains_slashing,
dmp as parachains_dmp, hrmp as parachains_hrmp, inclusion as parachains_inclusion,
@@ -1163,7 +1164,7 @@ impl pallet_message_queue::Config for Runtime {
impl parachains_dmp::Config for Runtime {}
parameter_types! {
- pub const DefaultChannelSizeAndCapacityWithSystem: (u32, u32) = (4096, 4);
+ pub const HrmpChannelSizeAndCapacityWithSystemRatio: Percent = Percent::from_percent(100);
}
impl parachains_hrmp::Config for Runtime {
@@ -1171,7 +1172,10 @@ impl parachains_hrmp::Config for Runtime {
type RuntimeEvent = RuntimeEvent;
type ChannelManager = EnsureRoot;
type Currency = Balances;
- type DefaultChannelSizeAndCapacityWithSystem = DefaultChannelSizeAndCapacityWithSystem;
+ type DefaultChannelSizeAndCapacityWithSystem = ActiveConfigHrmpChannelSizeAndCapacityRatio<
+ Runtime,
+ HrmpChannelSizeAndCapacityWithSystemRatio,
+ >;
type WeightInfo = weights::runtime_parachains_hrmp::WeightInfo;
}
@@ -1966,7 +1970,7 @@ sp_api::impl_runtime_apis! {
}
fn submit_report_equivocation_unsigned_extrinsic(
- equivocation_proof: beefy_primitives::EquivocationProof<
+ equivocation_proof: beefy_primitives::DoubleVotingProof<
BlockNumber,
BeefyId,
BeefySignature,
diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs
index 3d2ab827b8fd..529bdf761055 100644
--- a/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs
+++ b/polkadot/runtime/westend/src/weights/runtime_parachains_hrmp.rs
@@ -16,28 +16,26 @@
//! Autogenerated weights for `runtime_parachains::hrmp`
//!
-//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
+//! DATE: 2024-04-30, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=westend-dev
// --steps=50
// --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=runtime_parachains::hrmp
// --extrinsic=*
-// --execution=wasm
// --wasm-execution=compiled
-// --header=./file_header.txt
-// --output=./runtime/westend/src/weights/runtime_parachains_hrmp.rs
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=runtime_parachains::hrmp
+// --chain=westend-dev
+// --header=./polkadot/file_header.txt
+// --output=./polkadot/runtime/westend/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -50,99 +48,97 @@ use core::marker::PhantomData;
/// Weight functions for `runtime_parachains::hrmp`.
pub struct WeightInfo(PhantomData);
impl runtime_parachains::hrmp::WeightInfo for WeightInfo {
- /// Storage: Paras ParaLifecycles (r:2 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Paras::ParaLifecycles` (r:1 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn hrmp_init_open_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `307`
- // Estimated: `6247`
- // Minimum execution time: 35_676_000 picoseconds.
- Weight::from_parts(36_608_000, 0)
- .saturating_add(Weight::from_parts(0, 6247))
- .saturating_add(T::DbWeight::get().reads(9))
+ // Measured: `455`
+ // Estimated: `3920`
+ // Minimum execution time: 32_195_000 picoseconds.
+ Weight::from_parts(33_340_000, 0)
+ .saturating_add(Weight::from_parts(0, 3920))
+ .saturating_add(T::DbWeight::get().reads(8))
.saturating_add(T::DbWeight::get().writes(5))
}
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Paras ParaLifecycles (r:1 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn hrmp_accept_open_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `421`
- // Estimated: `3886`
- // Minimum execution time: 32_773_000 picoseconds.
- Weight::from_parts(33_563_000, 0)
- .saturating_add(Weight::from_parts(0, 3886))
- .saturating_add(T::DbWeight::get().reads(6))
+ // Measured: `445`
+ // Estimated: `3910`
+ // Minimum execution time: 28_644_000 picoseconds.
+ Weight::from_parts(29_581_000, 0)
+ .saturating_add(Weight::from_parts(0, 3910))
+ .saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(4))
}
- /// Storage: Hrmp HrmpChannels (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpCloseChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpChannels` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn hrmp_close_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `238`
- // Estimated: `3703`
- // Minimum execution time: 28_134_000 picoseconds.
- Weight::from_parts(29_236_000, 0)
- .saturating_add(Weight::from_parts(0, 3703))
+ // Measured: `558`
+ // Estimated: `4023`
+ // Minimum execution time: 31_824_000 picoseconds.
+ Weight::from_parts(33_207_000, 0)
+ .saturating_add(Weight::from_parts(0, 4023))
.saturating_add(T::DbWeight::get().reads(5))
.saturating_add(T::DbWeight::get().writes(4))
}
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:254 w:254)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:0 w:1)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannelContents (r:0 w:254)
- /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:0 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:254 w:254)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:0 w:1)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:254)
+ /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:0 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `i` is `[0, 127]`.
/// The range of component `e` is `[0, 127]`.
fn force_clean_hrmp(i: u32, e: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `158 + e * (100 ±0) + i * (100 ±0)`
- // Estimated: `3620 + e * (2575 ±0) + i * (2575 ±0)`
- // Minimum execution time: 1_217_145_000 picoseconds.
- Weight::from_parts(1_251_187_000, 0)
- .saturating_add(Weight::from_parts(0, 3620))
- // Standard Error: 118_884
- .saturating_add(Weight::from_parts(4_002_678, 0).saturating_mul(i.into()))
- // Standard Error: 118_884
- .saturating_add(Weight::from_parts(3_641_596, 0).saturating_mul(e.into()))
+ // Measured: `264 + e * (100 ±0) + i * (100 ±0)`
+ // Estimated: `3726 + e * (2575 ±0) + i * (2575 ±0)`
+ // Minimum execution time: 1_213_331_000 picoseconds.
+ Weight::from_parts(1_217_120_000, 0)
+ .saturating_add(Weight::from_parts(0, 3726))
+ // Standard Error: 108_190
+ .saturating_add(Weight::from_parts(3_485_701, 0).saturating_mul(i.into()))
+ // Standard Error: 108_190
+ .saturating_add(Weight::from_parts(3_564_287, 0).saturating_mul(e.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(i.into())))
.saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(e.into())))
@@ -152,135 +148,139 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf
.saturating_add(Weight::from_parts(0, 2575).saturating_mul(e.into()))
.saturating_add(Weight::from_parts(0, 2575).saturating_mul(i.into()))
}
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Paras ParaLifecycles (r:256 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:0 w:128)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Paras::ParaLifecycles` (r:256 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:0 w:128)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn force_process_hrmp_open(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `386 + c * (136 ±0)`
- // Estimated: `1841 + c * (5086 ±0)`
- // Minimum execution time: 6_277_000 picoseconds.
- Weight::from_parts(6_357_000, 0)
- .saturating_add(Weight::from_parts(0, 1841))
- // Standard Error: 41_189
- .saturating_add(Weight::from_parts(22_159_709, 0).saturating_mul(c.into()))
+ // Measured: `492 + c * (136 ±0)`
+ // Estimated: `1947 + c * (5086 ±0)`
+ // Minimum execution time: 6_040_000 picoseconds.
+ Weight::from_parts(5_644_307, 0)
+ .saturating_add(Weight::from_parts(0, 1947))
+ // Standard Error: 12_852
+ .saturating_add(Weight::from_parts(21_031_626, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().reads((7_u64).saturating_mul(c.into())))
.saturating_add(T::DbWeight::get().writes(1))
.saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(c.into())))
.saturating_add(Weight::from_parts(0, 5086).saturating_mul(c.into()))
}
- /// Storage: Hrmp HrmpCloseChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpCloseChannelRequests (r:0 w:128)
- /// Proof Skipped: Hrmp HrmpCloseChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannelContents (r:0 w:128)
- /// Proof Skipped: Hrmp HrmpChannelContents (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpCloseChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpCloseChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpCloseChannelRequests` (r:0 w:128)
+ /// Proof: `Hrmp::HrmpCloseChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannelContents` (r:0 w:128)
+ /// Proof: `Hrmp::HrmpChannelContents` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn force_process_hrmp_close(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `229 + c * (124 ±0)`
- // Estimated: `1689 + c * (2600 ±0)`
- // Minimum execution time: 5_070_000 picoseconds.
- Weight::from_parts(5_225_000, 0)
- .saturating_add(Weight::from_parts(0, 1689))
- // Standard Error: 24_173
- .saturating_add(Weight::from_parts(13_645_307, 0).saturating_mul(c.into()))
+ // Measured: `335 + c * (124 ±0)`
+ // Estimated: `1795 + c * (2600 ±0)`
+ // Minimum execution time: 4_950_000 picoseconds.
+ Weight::from_parts(5_215_558, 0)
+ .saturating_add(Weight::from_parts(0, 1795))
+ // Standard Error: 9_231
+ .saturating_add(Weight::from_parts(12_770_147, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(c.into())))
.saturating_add(T::DbWeight::get().writes(1))
.saturating_add(T::DbWeight::get().writes((5_u64).saturating_mul(c.into())))
.saturating_add(Weight::from_parts(0, 2600).saturating_mul(c.into()))
}
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn hrmp_cancel_open_request(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `920 + c * (13 ±0)`
- // Estimated: `4189 + c * (15 ±0)`
- // Minimum execution time: 20_449_000 picoseconds.
- Weight::from_parts(30_861_799, 0)
- .saturating_add(Weight::from_parts(0, 4189))
- // Standard Error: 6_642
- .saturating_add(Weight::from_parts(236_293, 0).saturating_mul(c.into()))
+ // Measured: `1026 + c * (13 ±0)`
+ // Estimated: `4295 + c * (15 ±0)`
+ // Minimum execution time: 17_550_000 picoseconds.
+ Weight::from_parts(25_522_933, 0)
+ .saturating_add(Weight::from_parts(0, 4295))
+ // Standard Error: 2_332
+ .saturating_add(Weight::from_parts(121_128, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(3))
.saturating_add(Weight::from_parts(0, 15).saturating_mul(c.into()))
}
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:128 w:128)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:128 w:128)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// The range of component `c` is `[0, 128]`.
fn clean_open_channel_requests(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `137 + c * (63 ±0)`
- // Estimated: `1616 + c * (2538 ±0)`
- // Minimum execution time: 3_911_000 picoseconds.
- Weight::from_parts(5_219_837, 0)
- .saturating_add(Weight::from_parts(0, 1616))
- // Standard Error: 10_219
- .saturating_add(Weight::from_parts(3_647_782, 0).saturating_mul(c.into()))
+ // Measured: `243 + c * (63 ±0)`
+ // Estimated: `1722 + c * (2538 ±0)`
+ // Minimum execution time: 3_782_000 picoseconds.
+ Weight::from_parts(5_263_610, 0)
+ .saturating_add(Weight::from_parts(0, 1722))
+ // Standard Error: 3_152
+ .saturating_add(Weight::from_parts(3_309_777, 0).saturating_mul(c.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into())))
.saturating_add(T::DbWeight::get().writes(1))
.saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into())))
.saturating_add(Weight::from_parts(0, 2538).saturating_mul(c.into()))
}
- /// Storage: Paras ParaLifecycles (r:2 w:0)
- /// Proof Skipped: Paras ParaLifecycles (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequests (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequests (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpChannels (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpChannels (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpEgressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpEgressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpOpenChannelRequestsList (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpOpenChannelRequestsList (max_values: Some(1), max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueues (r:2 w:2)
- /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured)
- /// Storage: Dmp DownwardMessageQueueHeads (r:2 w:2)
- /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpIngressChannelsIndex (r:1 w:0)
- /// Proof Skipped: Hrmp HrmpIngressChannelsIndex (max_values: None, max_size: None, mode: Measured)
- /// Storage: Hrmp HrmpAcceptedChannelRequestCount (r:1 w:1)
- /// Proof Skipped: Hrmp HrmpAcceptedChannelRequestCount (max_values: None, max_size: None, mode: Measured)
- fn force_open_hrmp_channel(_c: u32, ) -> Weight {
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Paras::ParaLifecycles` (r:1 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:1 w:0)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// The range of component `c` is `[0, 1]`.
+ fn force_open_hrmp_channel(c: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `307`
- // Estimated: `6247`
- // Minimum execution time: 50_870_000 picoseconds.
- Weight::from_parts(53_335_000, 0)
- .saturating_add(Weight::from_parts(0, 6247))
- .saturating_add(T::DbWeight::get().reads(13))
+ // Measured: `455 + c * (235 ±0)`
+ // Estimated: `6395 + c * (235 ±0)`
+ // Minimum execution time: 46_445_000 picoseconds.
+ Weight::from_parts(48_376_448, 0)
+ .saturating_add(Weight::from_parts(0, 6395))
+ // Standard Error: 130_148
+ .saturating_add(Weight::from_parts(13_606_551, 0).saturating_mul(c.into()))
+ .saturating_add(T::DbWeight::get().reads(12))
.saturating_add(T::DbWeight::get().writes(8))
+ .saturating_add(Weight::from_parts(0, 235).saturating_mul(c.into()))
}
/// Storage: `Paras::ParaLifecycles` (r:1 w:0)
/// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
@@ -304,11 +304,11 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf
/// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn establish_system_channel() -> Weight {
// Proof Size summary in bytes:
- // Measured: `417`
- // Estimated: `6357`
- // Minimum execution time: 629_674_000 picoseconds.
- Weight::from_parts(640_174_000, 0)
- .saturating_add(Weight::from_parts(0, 6357))
+ // Measured: `455`
+ // Estimated: `6395`
+ // Minimum execution time: 46_563_000 picoseconds.
+ Weight::from_parts(48_015_000, 0)
+ .saturating_add(Weight::from_parts(0, 6395))
.saturating_add(T::DbWeight::get().reads(12))
.saturating_add(T::DbWeight::get().writes(8))
}
@@ -318,20 +318,40 @@ impl runtime_parachains::hrmp::WeightInfo for WeightInf
// Proof Size summary in bytes:
// Measured: `263`
// Estimated: `3728`
- // Minimum execution time: 173_371_000 picoseconds.
- Weight::from_parts(175_860_000, 0)
+ // Minimum execution time: 12_252_000 picoseconds.
+ Weight::from_parts(12_550_000, 0)
.saturating_add(Weight::from_parts(0, 3728))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
+ /// Storage: `Paras::ParaLifecycles` (r:2 w:0)
+ /// Proof: `Paras::ParaLifecycles` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequests` (r:2 w:2)
+ /// Proof: `Hrmp::HrmpOpenChannelRequests` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpChannels` (r:2 w:0)
+ /// Proof: `Hrmp::HrmpChannels` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpEgressChannelsIndex` (r:2 w:0)
+ /// Proof: `Hrmp::HrmpEgressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestCount` (r:2 w:2)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpOpenChannelRequestsList` (r:1 w:1)
+ /// Proof: `Hrmp::HrmpOpenChannelRequestsList` (`max_values`: Some(1), `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueues` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Dmp::DownwardMessageQueueHeads` (r:2 w:2)
+ /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpIngressChannelsIndex` (r:2 w:0)
+ /// Proof: `Hrmp::HrmpIngressChannelsIndex` (`max_values`: None, `max_size`: None, mode: `Measured`)
+ /// Storage: `Hrmp::HrmpAcceptedChannelRequestCount` (r:2 w:2)
+ /// Proof: `Hrmp::HrmpAcceptedChannelRequestCount` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn establish_channel_with_system() -> Weight {
// Proof Size summary in bytes:
- // Measured: `417`
- // Estimated: `6357`
- // Minimum execution time: 629_674_000 picoseconds.
- Weight::from_parts(640_174_000, 0)
- .saturating_add(Weight::from_parts(0, 6357))
- .saturating_add(T::DbWeight::get().reads(12))
- .saturating_add(T::DbWeight::get().writes(8))
+ // Measured: `455`
+ // Estimated: `6395`
+ // Minimum execution time: 79_503_000 picoseconds.
+ Weight::from_parts(81_630_000, 0)
+ .saturating_add(Weight::from_parts(0, 6395))
+ .saturating_add(T::DbWeight::get().reads(19))
+ .saturating_add(T::DbWeight::get().writes(11))
}
}
diff --git a/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml
new file mode 100644
index 000000000000..14208425d62b
--- /dev/null
+++ b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.toml
@@ -0,0 +1,43 @@
+[settings]
+timeout = 1000
+
+[relaychain.genesis.runtimeGenesis.patch.configuration.config]
+ needed_approvals = 2
+
+[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params]
+ max_validators_per_core = 5
+
+[relaychain]
+default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}"
+chain = "rococo-local"
+default_command = "polkadot"
+
+[relaychain.default_resources]
+limits = { memory = "4G", cpu = "2" }
+requests = { memory = "2G", cpu = "1" }
+
+ [[relaychain.node_groups]]
+ name = "honest"
+ count = 4
+ args = ["-lparachain=debug,parachain::statement-distribution=trace"]
+
+ [[relaychain.nodes]]
+ image = "{{MALUS_IMAGE}}"
+ name = "malus"
+ command = "malus spam-statement-requests"
+ args = [ "--alice", "-lparachain=debug,MALUS=trace", "--spam-factor=1000" ]
+
+{% for id in range(2000,2001) %}
+[[parachains]]
+id = {{id}}
+ [parachains.collator]
+ image = "{{COL_IMAGE}}"
+ name = "collator"
+ command = "undying-collator"
+ args = ["-lparachain=debug"]
+{% endfor %}
+
+[types.Header]
+number = "u64"
+parent_hash = "Hash"
+post_state = "Hash"
diff --git a/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl
new file mode 100644
index 000000000000..9985dd24ee38
--- /dev/null
+++ b/polkadot/zombienet_tests/functional/0012-spam-statement-distribution-requests.zndsl
@@ -0,0 +1,27 @@
+Description: Test if parachains progress when group is getting spammed by statement distribution requests.
+Network: ./0012-spam-statement-distribution-requests.toml
+Creds: config
+
+# Check authority status and peers.
+malus: reports node_roles is 4
+honest: reports node_roles is 4
+
+# Ensure parachains are registered.
+honest: parachain 2000 is registered within 60 seconds
+
+# Ensure that malus is already attempting to DoS
+malus: log line contains "😈 Duplicating AttestedCandidateV2 request" within 90 seconds
+
+# Ensure parachains made progress.
+honest: parachain 2000 block height is at least 10 within 200 seconds
+
+# Ensure that honest nodes drop extra requests
+honest: log line contains "Peer already being served, dropping request" within 60 seconds
+
+# Check lag - approval
+honest: reports polkadot_parachain_approval_checking_finality_lag is 0
+
+# Check lag - dispute conclusion
+honest: reports polkadot_parachain_disputes_finality_lag is 0
+
+
diff --git a/prdoc/pr_3444.prdoc b/prdoc/pr_3444.prdoc
new file mode 100644
index 000000000000..3afb38106417
--- /dev/null
+++ b/prdoc/pr_3444.prdoc
@@ -0,0 +1,25 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Rate-limiting of statement distribution v2 requests to 1 per peer
+
+doc:
+ - audience: Node Dev
+ description: |
+ A new malicious node variant that sends duplicate statement
+ distribution messages to spam other peers.
+
+ - audience: Node Operator
+ description: |
+ Added rate-limiting in the statement distribution request-response
+ protocol. Requesters will not issue another request to a peer if one
+ is already pending with that peer and receiving nodes will reject
+ requests from peers that they are currently serving.
+ This should reduce the risk of validator-validator DoS attacks and
+ better load-balance statement distribution.
+
+crates:
+ - name: polkadot-test-malus
+ bump: minor
+ - name: polkadot-statement-distribution
+ bump: minor
diff --git a/prdoc/pr_4312.prdoc b/prdoc/pr_4312.prdoc
new file mode 100644
index 000000000000..d773edbd14de
--- /dev/null
+++ b/prdoc/pr_4312.prdoc
@@ -0,0 +1,19 @@
+title: Add `Deposited`/`Withdrawn` events for `pallet-assets`
+
+doc:
+ - audience: Runtime Dev
+ description: |
+ New events were added to `pallet-assets`: `Deposited` and `Withdrawn`. Make sure
+ to cover those events on tests if necessary.
+ - audience: Runtime User
+ description: |
+ New events were added to `pallet-assets`: `Deposited` and `Withdrawn`. These indicate
+ a change in the balance of an account.
+
+crates:
+ - name: pallet-assets
+ bump: minor
+ - name: pallet-asset-tx-payment
+ bump: minor
+ - name: pallet-asset-conversion-tx-payment
+ bump: minor
diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs
index 5d8016532a5d..18b0d0c31a4d 100644
--- a/substrate/bin/node/runtime/src/lib.rs
+++ b/substrate/bin/node/runtime/src/lib.rs
@@ -3053,7 +3053,7 @@ impl_runtime_apis! {
}
fn submit_report_equivocation_unsigned_extrinsic(
- equivocation_proof: sp_consensus_beefy::EquivocationProof<
+ equivocation_proof: sp_consensus_beefy::DoubleVotingProof<
BlockNumber,
BeefyId,
BeefySignature,
diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml
index 7b61b3c6c01f..435604a9473b 100644
--- a/substrate/client/consensus/beefy/Cargo.toml
+++ b/substrate/client/consensus/beefy/Cargo.toml
@@ -39,7 +39,6 @@ sp-consensus-beefy = { path = "../../../primitives/consensus/beefy" }
sp-core = { path = "../../../primitives/core" }
sp-crypto-hashing = { path = "../../../primitives/crypto/hashing" }
sp-keystore = { path = "../../../primitives/keystore" }
-sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" }
sp-runtime = { path = "../../../primitives/runtime" }
tokio = "1.37"
@@ -51,6 +50,7 @@ sc-block-builder = { path = "../../block-builder" }
sc-network-test = { path = "../../network/test" }
sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa" }
sp-keyring = { path = "../../../primitives/keyring" }
+sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" }
sp-tracing = { path = "../../../primitives/tracing" }
substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" }
diff --git a/substrate/client/consensus/beefy/src/fisherman.rs b/substrate/client/consensus/beefy/src/fisherman.rs
new file mode 100644
index 000000000000..a2b4c8f945d1
--- /dev/null
+++ b/substrate/client/consensus/beefy/src/fisherman.rs
@@ -0,0 +1,162 @@
+// This file is part of Substrate.
+
+// Copyright (C) Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+use crate::{error::Error, keystore::BeefyKeystore, round::Rounds, LOG_TARGET};
+use log::{debug, error, warn};
+use sc_client_api::Backend;
+use sp_api::ProvideRuntimeApi;
+use sp_blockchain::HeaderBackend;
+use sp_consensus_beefy::{
+ check_equivocation_proof,
+ ecdsa_crypto::{AuthorityId, Signature},
+ BeefyApi, BeefySignatureHasher, DoubleVotingProof, OpaqueKeyOwnershipProof, ValidatorSetId,
+};
+use sp_runtime::{
+ generic::BlockId,
+ traits::{Block, NumberFor},
+};
+use std::{marker::PhantomData, sync::Arc};
+
+/// Helper struct containing the id and the key ownership proof for a validator.
+pub struct ProvedValidator<'a> {
+ pub id: &'a AuthorityId,
+ pub key_owner_proof: OpaqueKeyOwnershipProof,
+}
+
+/// Helper used to check and report equivocations.
+pub struct Fisherman {
+ backend: Arc,
+ runtime: Arc,
+ key_store: Arc>,
+
+ _phantom: PhantomData,
+}
+
+impl, RuntimeApi: ProvideRuntimeApi> Fisherman
+where
+ RuntimeApi::Api: BeefyApi,
+{
+ pub fn new(
+ backend: Arc,
+ runtime: Arc,
+ keystore: Arc>,
+ ) -> Self {
+ Self { backend, runtime, key_store: keystore, _phantom: Default::default() }
+ }
+
+ fn prove_offenders<'a>(
+ &self,
+ at: BlockId,
+ offender_ids: impl Iterator- ,
+ validator_set_id: ValidatorSetId,
+ ) -> Result>, Error> {
+ let hash = match at {
+ BlockId::Hash(hash) => hash,
+ BlockId::Number(number) => self
+ .backend
+ .blockchain()
+ .expect_block_hash_from_id(&BlockId::Number(number))
+ .map_err(|err| {
+ Error::Backend(format!(
+ "Couldn't get hash for block #{:?} (error: {:?}). \
+ Skipping report for equivocation",
+ at, err
+ ))
+ })?,
+ };
+
+ let runtime_api = self.runtime.runtime_api();
+ let mut proved_offenders = vec![];
+ for offender_id in offender_ids {
+ match runtime_api.generate_key_ownership_proof(
+ hash,
+ validator_set_id,
+ offender_id.clone(),
+ ) {
+ Ok(Some(key_owner_proof)) => {
+ proved_offenders.push(ProvedValidator { id: offender_id, key_owner_proof });
+ },
+ Ok(None) => {
+ debug!(
+ target: LOG_TARGET,
+ "🥩 Equivocation offender {} not part of the authority set {}.",
+ offender_id, validator_set_id
+ );
+ },
+ Err(e) => {
+ error!(
+ target: LOG_TARGET,
+ "🥩 Error generating key ownership proof for equivocation offender {} \
+ in authority set {}: {}",
+ offender_id, validator_set_id, e
+ );
+ },
+ };
+ }
+
+ Ok(proved_offenders)
+ }
+
+ /// Report the given equivocation to the BEEFY runtime module. This method
+ /// generates a session membership proof of the offender and then submits an
+ /// extrinsic to report the equivocation. In particular, the session membership
+ /// proof must be generated at the block at which the given set was active which
+ /// isn't necessarily the best block if there are pending authority set changes.
+ pub fn report_double_voting(
+ &self,
+ proof: DoubleVotingProof, AuthorityId, Signature>,
+ active_rounds: &Rounds,
+ ) -> Result<(), Error> {
+ let (validators, validator_set_id) =
+ (active_rounds.validators(), active_rounds.validator_set_id());
+ let offender_id = proof.offender_id();
+
+ if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) {
+ debug!(target: LOG_TARGET, "🥩 Skipping report for bad equivocation {:?}", proof);
+ return Ok(())
+ }
+
+ if let Some(local_id) = self.key_store.authority_id(validators) {
+ if offender_id == &local_id {
+ warn!(target: LOG_TARGET, "🥩 Skipping report for own equivocation");
+ return Ok(())
+ }
+ }
+
+ let key_owner_proofs = self.prove_offenders(
+ BlockId::Number(*proof.round_number()),
+ vec![offender_id].into_iter(),
+ validator_set_id,
+ )?;
+
+ // submit equivocation report at **best** block
+ let best_block_hash = self.backend.blockchain().info().best_hash;
+ for ProvedValidator { key_owner_proof, .. } in key_owner_proofs {
+ self.runtime
+ .runtime_api()
+ .submit_report_equivocation_unsigned_extrinsic(
+ best_block_hash,
+ proof.clone(),
+ key_owner_proof,
+ )
+ .map_err(Error::RuntimeApi)?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs
index 2637481fbf3e..0e49839f0fd2 100644
--- a/substrate/client/consensus/beefy/src/lib.rs
+++ b/substrate/client/consensus/beefy/src/lib.rs
@@ -43,11 +43,10 @@ use sp_api::ProvideRuntimeApi;
use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend};
use sp_consensus::{Error as ConsensusError, SyncOracle};
use sp_consensus_beefy::{
- ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, MmrRootHash, PayloadProvider, ValidatorSet,
+ ecdsa_crypto::AuthorityId, BeefyApi, ConsensusLog, PayloadProvider, ValidatorSet,
BEEFY_ENGINE_ID,
};
use sp_keystore::KeystorePtr;
-use sp_mmr_primitives::MmrApi;
use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero};
use std::{
collections::{BTreeMap, VecDeque},
@@ -69,6 +68,7 @@ pub mod justification;
use crate::{
communication::gossip::GossipValidator,
+ fisherman::Fisherman,
justification::BeefyVersionedFinalityProof,
keystore::BeefyKeystore,
metrics::VoterMetrics,
@@ -80,6 +80,7 @@ pub use communication::beefy_protocol_name::{
};
use sp_runtime::generic::OpaqueDigestItemId;
+mod fisherman;
#[cfg(test)]
mod tests;
@@ -305,14 +306,16 @@ where
pending_justifications: BTreeMap, BeefyVersionedFinalityProof>,
is_authority: bool,
) -> BeefyWorker {
+ let key_store = Arc::new(self.key_store);
BeefyWorker {
- backend: self.backend,
- runtime: self.runtime,
- key_store: self.key_store,
- metrics: self.metrics,
- persisted_state: self.persisted_state,
+ backend: self.backend.clone(),
+ runtime: self.runtime.clone(),
+ key_store: key_store.clone(),
payload_provider,
sync,
+ fisherman: Arc::new(Fisherman::new(self.backend, self.runtime, key_store)),
+ metrics: self.metrics,
+ persisted_state: self.persisted_state,
comms,
links,
pending_justifications,
@@ -487,7 +490,7 @@ pub async fn start_beefy_gadget(
C: Client + BlockBackend,
P: PayloadProvider + Clone,
R: ProvideRuntimeApi,
- R::Api: BeefyApi + MmrApi>,
+ R::Api: BeefyApi,
N: GossipNetwork + NetworkRequest + Send + Sync + 'static,
S: GossipSyncing + SyncOracle + 'static,
{
diff --git a/substrate/client/consensus/beefy/src/round.rs b/substrate/client/consensus/beefy/src/round.rs
index 0045dc70c260..5dae80cb1830 100644
--- a/substrate/client/consensus/beefy/src/round.rs
+++ b/substrate/client/consensus/beefy/src/round.rs
@@ -22,7 +22,7 @@ use codec::{Decode, Encode};
use log::{debug, info};
use sp_consensus_beefy::{
ecdsa_crypto::{AuthorityId, Signature},
- Commitment, EquivocationProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage,
+ Commitment, DoubleVotingProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage,
};
use sp_runtime::traits::{Block, NumberFor};
use std::collections::BTreeMap;
@@ -61,7 +61,7 @@ pub fn threshold(authorities: usize) -> usize {
pub enum VoteImportResult {
Ok,
RoundConcluded(SignedCommitment, Signature>),
- Equivocation(EquivocationProof, AuthorityId, Signature>),
+ DoubleVoting(DoubleVotingProof, AuthorityId, Signature>),
Invalid,
Stale,
}
@@ -153,7 +153,7 @@ where
target: LOG_TARGET,
"🥩 detected equivocated vote: 1st: {:?}, 2nd: {:?}", previous_vote, vote
);
- return VoteImportResult::Equivocation(EquivocationProof {
+ return VoteImportResult::DoubleVoting(DoubleVotingProof {
first: previous_vote.clone(),
second: vote,
})
@@ -207,7 +207,7 @@ mod tests {
use sc_network_test::Block;
use sp_consensus_beefy::{
- known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, EquivocationProof, Payload,
+ known_payloads::MMR_ROOT_ID, test_utils::Keyring, Commitment, DoubleVotingProof, Payload,
SignedCommitment, ValidatorSet, VoteMessage,
};
@@ -494,7 +494,7 @@ mod tests {
let mut alice_vote2 = alice_vote1.clone();
alice_vote2.commitment = commitment2;
- let expected_result = VoteImportResult::Equivocation(EquivocationProof {
+ let expected_result = VoteImportResult::DoubleVoting(DoubleVotingProof {
first: alice_vote1.clone(),
second: alice_vote2.clone(),
});
diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs
index 9b13d1da6d7d..2bb145d660df 100644
--- a/substrate/client/consensus/beefy/src/tests.rs
+++ b/substrate/client/consensus/beefy/src/tests.rs
@@ -59,7 +59,7 @@ use sp_consensus_beefy::{
known_payloads,
mmr::{find_mmr_root_digest, MmrRootProvider},
test_utils::Keyring as BeefyKeyring,
- BeefyApi, Commitment, ConsensusLog, EquivocationProof, MmrRootHash, OpaqueKeyOwnershipProof,
+ BeefyApi, Commitment, ConsensusLog, DoubleVotingProof, MmrRootHash, OpaqueKeyOwnershipProof,
Payload, SignedCommitment, ValidatorSet, ValidatorSetId, VersionedFinalityProof, VoteMessage,
BEEFY_ENGINE_ID,
};
@@ -259,7 +259,7 @@ pub(crate) struct TestApi {
pub validator_set: Option,
pub mmr_root_hash: MmrRootHash,
pub reported_equivocations:
- Option, AuthorityId, Signature>>>>>,
+ Option, AuthorityId, Signature>>>>>,
}
impl TestApi {
@@ -313,7 +313,7 @@ sp_api::mock_impl_runtime_apis! {
}
fn submit_report_equivocation_unsigned_extrinsic(
- proof: EquivocationProof, AuthorityId, Signature>,
+ proof: DoubleVotingProof, AuthorityId, Signature>,
_dummy: OpaqueKeyOwnershipProof,
) -> Option<()> {
if let Some(equivocations_buf) = self.inner.reported_equivocations.as_ref() {
diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs
index 05575ae01c30..cfbb3d63aea4 100644
--- a/substrate/client/consensus/beefy/src/worker.rs
+++ b/substrate/client/consensus/beefy/src/worker.rs
@@ -23,6 +23,7 @@ use crate::{
},
error::Error,
find_authorities_change,
+ fisherman::Fisherman,
justification::BeefyVersionedFinalityProof,
keystore::BeefyKeystore,
metric_inc, metric_set,
@@ -39,10 +40,9 @@ use sp_api::ProvideRuntimeApi;
use sp_arithmetic::traits::{AtLeast32Bit, Saturating};
use sp_consensus::SyncOracle;
use sp_consensus_beefy::{
- check_equivocation_proof,
ecdsa_crypto::{AuthorityId, Signature},
- BeefyApi, BeefySignatureHasher, Commitment, EquivocationProof, PayloadProvider, ValidatorSet,
- VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID,
+ BeefyApi, Commitment, DoubleVotingProof, PayloadProvider, ValidatorSet, VersionedFinalityProof,
+ VoteMessage, BEEFY_ENGINE_ID,
};
use sp_runtime::{
generic::BlockId,
@@ -377,9 +377,10 @@ pub(crate) struct BeefyWorker {
// utilities
pub backend: Arc,
pub runtime: Arc,
- pub key_store: BeefyKeystore,
+ pub key_store: Arc>,
pub payload_provider: P,
pub sync: Arc
,
+ pub fisherman: Arc>,
// communication (created once, but returned and reused if worker is restarted/reinitialized)
pub comms: BeefyComms,
@@ -590,9 +591,9 @@ where
}
metric_inc!(self.metrics, beefy_good_votes_processed);
},
- VoteImportResult::Equivocation(proof) => {
+ VoteImportResult::DoubleVoting(proof) => {
metric_inc!(self.metrics, beefy_equivocation_votes);
- self.report_equivocation(proof)?;
+ self.report_double_voting(proof)?;
},
VoteImportResult::Invalid => metric_inc!(self.metrics, beefy_invalid_votes),
VoteImportResult::Stale => metric_inc!(self.metrics, beefy_stale_votes),
@@ -941,64 +942,13 @@ where
(error, self.comms)
}
- /// Report the given equivocation to the BEEFY runtime module. This method
- /// generates a session membership proof of the offender and then submits an
- /// extrinsic to report the equivocation. In particular, the session membership
- /// proof must be generated at the block at which the given set was active which
- /// isn't necessarily the best block if there are pending authority set changes.
- pub(crate) fn report_equivocation(
+ /// Report the given equivocation to the BEEFY runtime module.
+ fn report_double_voting(
&self,
- proof: EquivocationProof, AuthorityId, Signature>,
+ proof: DoubleVotingProof, AuthorityId, Signature>,
) -> Result<(), Error> {
let rounds = self.persisted_state.voting_oracle.active_rounds()?;
- let (validators, validator_set_id) = (rounds.validators(), rounds.validator_set_id());
- let offender_id = proof.offender_id().clone();
-
- if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) {
- debug!(target: LOG_TARGET, "🥩 Skip report for bad equivocation {:?}", proof);
- return Ok(())
- } else if let Some(local_id) = self.key_store.authority_id(validators) {
- if offender_id == local_id {
- warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation");
- return Ok(())
- }
- }
-
- let number = *proof.round_number();
- let hash = self
- .backend
- .blockchain()
- .expect_block_hash_from_id(&BlockId::Number(number))
- .map_err(|err| {
- let err_msg = format!(
- "Couldn't get hash for block #{:?} (error: {:?}), skipping report for equivocation",
- number, err
- );
- Error::Backend(err_msg)
- })?;
- let runtime_api = self.runtime.runtime_api();
- // generate key ownership proof at that block
- let key_owner_proof = match runtime_api
- .generate_key_ownership_proof(hash, validator_set_id, offender_id)
- .map_err(Error::RuntimeApi)?
- {
- Some(proof) => proof,
- None => {
- debug!(
- target: LOG_TARGET,
- "🥩 Equivocation offender not part of the authority set."
- );
- return Ok(())
- },
- };
-
- // submit equivocation report at **best** block
- let best_block_hash = self.backend.blockchain().info().best_hash;
- runtime_api
- .submit_report_equivocation_unsigned_extrinsic(best_block_hash, proof, key_owner_proof)
- .map_err(Error::RuntimeApi)?;
-
- Ok(())
+ self.fisherman.report_double_voting(proof, rounds)
}
}
@@ -1165,13 +1115,15 @@ pub(crate) mod tests {
.unwrap();
let payload_provider = MmrRootProvider::new(api.clone());
let comms = BeefyComms { gossip_engine, gossip_validator, on_demand_justifications };
+ let key_store: Arc> = Arc::new(Some(keystore).into());
BeefyWorker {
- backend,
- runtime: api,
- key_store: Some(keystore).into(),
+ backend: backend.clone(),
+ runtime: api.clone(),
+ key_store: key_store.clone(),
metrics,
payload_provider,
sync: Arc::new(sync),
+ fisherman: Arc::new(Fisherman::new(backend, api, key_store)),
links,
comms,
pending_justifications: BTreeMap::new(),
@@ -1590,6 +1542,11 @@ pub(crate) mod tests {
let mut net = BeefyTestNet::new(1);
let mut worker = create_beefy_worker(net.peer(0), &keys[0], 1, validator_set.clone());
worker.runtime = api_alice.clone();
+ worker.fisherman = Arc::new(Fisherman::new(
+ worker.backend.clone(),
+ worker.runtime.clone(),
+ worker.key_store.clone(),
+ ));
// let there be a block with num = 1:
let _ = net.peer(0).push_blocks(1, false);
@@ -1604,7 +1561,7 @@ pub(crate) mod tests {
);
{
// expect voter (Alice) to successfully report it
- assert_eq!(worker.report_equivocation(good_proof.clone()), Ok(()));
+ assert_eq!(worker.report_double_voting(good_proof.clone()), Ok(()));
// verify Alice reports Bob equivocation to runtime
let reported = api_alice.reported_equivocations.as_ref().unwrap().lock();
assert_eq!(reported.len(), 1);
@@ -1616,7 +1573,7 @@ pub(crate) mod tests {
let mut bad_proof = good_proof.clone();
bad_proof.first.id = Keyring::Charlie.public();
// bad proofs are simply ignored
- assert_eq!(worker.report_equivocation(bad_proof), Ok(()));
+ assert_eq!(worker.report_double_voting(bad_proof), Ok(()));
// verify nothing reported to runtime
assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty());
@@ -1625,7 +1582,7 @@ pub(crate) mod tests {
old_proof.first.commitment.validator_set_id = 0;
old_proof.second.commitment.validator_set_id = 0;
// old proofs are simply ignored
- assert_eq!(worker.report_equivocation(old_proof), Ok(()));
+ assert_eq!(worker.report_double_voting(old_proof), Ok(()));
// verify nothing reported to runtime
assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty());
@@ -1635,7 +1592,7 @@ pub(crate) mod tests {
(block_num, payload2.clone(), set_id, &Keyring::Alice),
);
// equivocations done by 'self' are simply ignored (not reported)
- assert_eq!(worker.report_equivocation(self_proof), Ok(()));
+ assert_eq!(worker.report_double_voting(self_proof), Ok(()));
// verify nothing reported to runtime
assert!(api_alice.reported_equivocations.as_ref().unwrap().lock().is_empty());
}
diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs
index 9f837a604341..30122f6d788f 100644
--- a/substrate/frame/assets/src/impl_fungibles.rs
+++ b/substrate/frame/assets/src/impl_fungibles.rs
@@ -118,6 +118,22 @@ impl, I: 'static> fungibles::Balanced<::AccountI
{
type OnDropCredit = fungibles::DecreaseIssuance;
type OnDropDebt = fungibles::IncreaseIssuance;
+
+ fn done_deposit(
+ asset_id: Self::AssetId,
+ who: &::AccountId,
+ amount: Self::Balance,
+ ) {
+ Self::deposit_event(Event::Deposited { asset_id, who: who.clone(), amount })
+ }
+
+ fn done_withdraw(
+ asset_id: Self::AssetId,
+ who: &::AccountId,
+ amount: Self::Balance,
+ ) {
+ Self::deposit_event(Event::Withdrawn { asset_id, who: who.clone(), amount })
+ }
}
impl, I: 'static> fungibles::Unbalanced for Pallet {
diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs
index 9056b1eefbdc..d52149225558 100644
--- a/substrate/frame/assets/src/lib.rs
+++ b/substrate/frame/assets/src/lib.rs
@@ -571,6 +571,10 @@ pub mod pallet {
Touched { asset_id: T::AssetId, who: T::AccountId, depositor: T::AccountId },
/// Some account `who` was blocked.
Blocked { asset_id: T::AssetId, who: T::AccountId },
+ /// Some assets were deposited (e.g. for transaction fees).
+ Deposited { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance },
+ /// Some assets were withdrawn from the account (e.g. for transaction fees).
+ Withdrawn { asset_id: T::AssetId, who: T::AccountId, amount: T::Balance },
}
#[pallet::error]
diff --git a/substrate/frame/assets/src/tests/sets.rs b/substrate/frame/assets/src/tests/sets.rs
index f85a736c0832..4d75b8aeab2c 100644
--- a/substrate/frame/assets/src/tests/sets.rs
+++ b/substrate/frame/assets/src/tests/sets.rs
@@ -90,6 +90,12 @@ fn deposit_from_set_types_works() {
assert_eq!(First::::balance((), &account2), 50);
assert_eq!(First::::total_issuance(()), 100);
+ System::assert_has_event(RuntimeEvent::Assets(crate::Event::Deposited {
+ asset_id: asset1,
+ who: account2,
+ amount: 50,
+ }));
+
assert_eq!(imb.peek(), 50);
let (imb1, imb2) = imb.split(30);
@@ -336,6 +342,12 @@ fn withdraw_from_set_types_works() {
assert_eq!(First::::balance((), &account2), 50);
assert_eq!(First::::total_issuance(()), 200);
+ System::assert_has_event(RuntimeEvent::Assets(crate::Event::Withdrawn {
+ asset_id: asset1,
+ who: account2,
+ amount: 50,
+ }));
+
assert_eq!(imb.peek(), 50);
drop(imb);
assert_eq!(First::::total_issuance(()), 150);
diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs
index bbc6eae6af29..aecc9e721d5c 100644
--- a/substrate/frame/beefy/src/equivocation.rs
+++ b/substrate/frame/beefy/src/equivocation.rs
@@ -38,7 +38,7 @@ use codec::{self as codec, Decode, Encode};
use frame_support::traits::{Get, KeyOwnerProofSystem};
use frame_system::pallet_prelude::BlockNumberFor;
use log::{error, info};
-use sp_consensus_beefy::{EquivocationProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE};
+use sp_consensus_beefy::{DoubleVotingProof, ValidatorSetId, KEY_TYPE as BEEFY_KEY_TYPE};
use sp_runtime::{
transaction_validity::{
InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity,
@@ -123,7 +123,7 @@ pub struct EquivocationReportSystem(sp_std::marker::PhantomData<(T,
/// Equivocation evidence convenience alias.
pub type EquivocationEvidenceFor = (
- EquivocationProof<
+ DoubleVotingProof<
BlockNumberFor,
::BeefyId,
<::BeefyId as RuntimeAppPublic>::Signature,
diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs
index 09cd13ab70a4..63f3e9bb309c 100644
--- a/substrate/frame/beefy/src/lib.rs
+++ b/substrate/frame/beefy/src/lib.rs
@@ -41,7 +41,7 @@ use sp_staking::{offence::OffenceReportSystem, SessionIndex};
use sp_std::prelude::*;
use sp_consensus_beefy::{
- AuthorityIndex, BeefyAuthorityId, ConsensusLog, EquivocationProof, OnNewValidatorSet,
+ AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, OnNewValidatorSet,
ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID,
};
@@ -210,7 +210,7 @@ pub mod pallet {
pub fn report_equivocation(
origin: OriginFor,
equivocation_proof: Box<
- EquivocationProof<
+ DoubleVotingProof<
BlockNumberFor,
T::BeefyId,
::Signature,
@@ -245,7 +245,7 @@ pub mod pallet {
pub fn report_equivocation_unsigned(
origin: OriginFor,
equivocation_proof: Box<
- EquivocationProof<
+ DoubleVotingProof<
BlockNumberFor,
T::BeefyId,
::Signature,
@@ -368,7 +368,7 @@ impl Pallet {
/// an unsigned extrinsic with a call to `report_equivocation_unsigned` and
/// will push the transaction to the pool. Only useful in an offchain context.
pub fn submit_unsigned_equivocation_report(
- equivocation_proof: EquivocationProof<
+ equivocation_proof: DoubleVotingProof<
BlockNumberFor,
T::BeefyId,
::Signature,
diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs
index 20fdd9a243d1..bf3c00b3ff1f 100644
--- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs
+++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs
@@ -14,8 +14,9 @@
// You should have received a copy of the GNU General Public License
// along with Polkadot. If not, see .
-use super::{Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeHoldReason};
-use frame_support::{derive_impl, parameter_types, traits::Contains};
+use super::{Balances, Runtime, RuntimeCall, RuntimeEvent};
+use crate::parachain::RuntimeHoldReason;
+use frame_support::{derive_impl, parameter_types};
parameter_types! {
pub Schedule: pallet_contracts::Schedule = Default::default();
@@ -28,14 +29,5 @@ impl pallet_contracts::Config for Runtime {
type Currency = Balances;
type Schedule = Schedule;
type Time = super::Timestamp;
- type CallFilter = CallFilter;
type Xcm = pallet_xcm::Pallet;
}
-
-/// In this mock, we only allow other contract calls via XCM.
-pub struct CallFilter;
-impl Contains for CallFilter {
- fn contains(call: &RuntimeCall) -> bool {
- matches!(call, RuntimeCall::Contracts(pallet_contracts::Call::call { .. }))
- }
-}
diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs
index e7d1f6279aa3..48a94e172a02 100644
--- a/substrate/frame/contracts/mock-network/src/tests.rs
+++ b/substrate/frame/contracts/mock-network/src/tests.rs
@@ -22,10 +22,7 @@ use crate::{
relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE,
};
use codec::{Decode, Encode};
-use frame_support::{
- assert_err,
- traits::{fungibles::Mutate, Currency},
-};
+use frame_support::traits::{fungibles::Mutate, Currency};
use pallet_contracts::{test_utils::builder::*, Code};
use pallet_contracts_fixtures::compile_module;
use pallet_contracts_uapi::ReturnErrorCode;
@@ -132,26 +129,6 @@ fn test_xcm_execute_incomplete() {
});
}
-#[test]
-fn test_xcm_execute_filtered_call() {
- MockNet::reset();
-
- let contract_addr = instantiate_test_contract("xcm_execute");
-
- ParaA::execute_with(|| {
- // `remark` should be rejected, as it is not allowed by our CallFilter.
- let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] });
- let message: Xcm = Xcm::builder_unsafe()
- .transact(OriginKind::Native, Weight::MAX, call.encode())
- .build();
- let result = bare_call(contract_addr.clone())
- .data(VersionedXcm::V4(message).encode())
- .build()
- .result;
- assert_err!(result, frame_system::Error::::CallFiltered);
- });
-}
-
#[test]
fn test_xcm_execute_reentrant_call() {
MockNet::reset();
diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs
index 0045d72141c9..3e87eb9f37ea 100644
--- a/substrate/frame/contracts/src/lib.rs
+++ b/substrate/frame/contracts/src/lib.rs
@@ -307,6 +307,9 @@ pub mod pallet {
/// Therefore please make sure to be restrictive about which dispatchables are allowed
/// in order to not introduce a new DoS vector like memory allocation patterns that can
/// be exploited to drive the runtime into a panic.
+ ///
+ /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact
+ /// calls, you must configure them separately within the XCM pallet itself.
#[pallet::no_default_bounds]
type CallFilter: Contains<::RuntimeCall>;
diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs
index 52ceda99edb7..3212aff31269 100644
--- a/substrate/frame/contracts/src/wasm/runtime.rs
+++ b/substrate/frame/contracts/src/wasm/runtime.rs
@@ -25,12 +25,8 @@ use crate::{
};
use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen};
use frame_support::{
- dispatch::DispatchInfo,
- ensure,
- pallet_prelude::{DispatchResult, DispatchResultWithPostInfo},
- parameter_types,
- traits::Get,
- weights::Weight,
+ dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types,
+ traits::Get, weights::Weight,
};
use pallet_contracts_proc_macro::define_env;
use pallet_contracts_uapi::{CallFlags, ReturnFlags};
@@ -41,7 +37,6 @@ use sp_runtime::{
};
use sp_std::{fmt, prelude::*};
use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store};
-use xcm::VersionedXcm;
type CallOf = ::RuntimeCall;
@@ -378,29 +373,6 @@ fn already_charged(_: u32) -> Option {
None
}
-/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`]
-/// instruction with a call that is not allowed by the CallFilter.
-fn ensure_executable(message: &VersionedXcm>) -> DispatchResult {
- use frame_support::traits::Contains;
- use xcm::prelude::{Transact, Xcm};
-
- let mut message: Xcm> =
- message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?;
-
- message.iter_mut().try_for_each(|inst| -> DispatchResult {
- let Transact { ref mut call, .. } = inst else { return Ok(()) };
- let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?;
-
- if !::CallFilter::contains(call) {
- return Err(frame_system::Error::::CallFiltered.into())
- }
-
- Ok(())
- })?;
-
- Ok(())
-}
-
/// Can only be used for one call.
pub struct Runtime<'a, E: Ext + 'a> {
ext: &'a mut E,
@@ -2117,7 +2089,6 @@ pub mod env {
ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?;
let message: VersionedXcm> =
ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?;
- ensure_executable::(&message)?;
let execute_weight =
<::Xcm as ExecuteController<_, _>>::WeightInfo::execute();
diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs
index b0041ccc0754..f055e8ce28e9 100644
--- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs
+++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs
@@ -66,6 +66,7 @@ pub fn expand_outer_dispatch(
quote! {
#( #query_call_part_macros )*
+ /// The aggregated runtime call type.
#[derive(
Clone, PartialEq, Eq,
#scrate::__private::codec::Encode,
diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs
index b083abbb2a8d..1505d158895f 100644
--- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs
+++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs
@@ -533,6 +533,7 @@ pub(crate) fn decl_all_pallets<'a>(
for pallet_declaration in pallet_declarations {
let type_name = &pallet_declaration.name;
let pallet = &pallet_declaration.path;
+ let docs = &pallet_declaration.docs;
let mut generics = vec![quote!(#runtime)];
generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name)));
let mut attrs = Vec::new();
@@ -541,6 +542,7 @@ pub(crate) fn decl_all_pallets<'a>(
attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed"));
}
let type_decl = quote!(
+ #( #[doc = #docs] )*
#(#attrs)*
pub type #type_name = #pallet::Pallet <#(#generics),*>;
);
diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs
index 31866c787b0f..ded77bed4c8e 100644
--- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs
+++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs
@@ -605,6 +605,8 @@ pub struct Pallet {
pub pallet_parts: Vec,
/// Expressions specified inside of a #[cfg] attribute.
pub cfg_pattern: Vec,
+ /// The doc literals
+ pub docs: Vec,
}
impl Pallet {
@@ -774,6 +776,7 @@ fn convert_pallets(pallets: Vec) -> syn::Result>>()?;
diff --git a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs
index 99364aaa96cd..1975f059152c 100644
--- a/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs
+++ b/substrate/frame/support/procedural/src/pallet/expand/tt_default_parts.rs
@@ -198,9 +198,9 @@ pub fn expand_tt_default_parts(def: &mut Def) -> proc_macro2::TokenStream {
macro_rules! #default_parts_unique_id_v2 {
{
$caller:tt
- frame_support = [{ $($frame_support:ident)::* }]
+ your_tt_return = [{ $my_tt_return:path }]
} => {
- $($frame_support)*::__private::tt_return! {
+ $my_tt_return! {
$caller
tokens = [{
+ Pallet #call_part_v2 #storage_part_v2 #event_part_v2 #error_part_v2 #origin_part_v2 #config_part_v2
diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs
index 011f69f37147..43f11896808c 100644
--- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs
+++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs
@@ -93,7 +93,7 @@ fn construct_runtime_implicit_to_explicit(
let frame_support = generate_access_from_frame_or_crate("frame-support")?;
let attr = if legacy_ordering { quote!((legacy_ordering)) } else { quote!() };
let mut expansion = quote::quote!(
- #[frame_support::runtime #attr]
+ #[#frame_support::runtime #attr]
#input
);
for pallet in definition.pallet_decls.iter() {
@@ -103,7 +103,7 @@ fn construct_runtime_implicit_to_explicit(
expansion = quote::quote!(
#frame_support::__private::tt_call! {
macro = [{ #pallet_path::tt_default_parts_v2 }]
- frame_support = [{ #frame_support }]
+ your_tt_return = [{ #frame_support::__private::tt_return }]
~~> #frame_support::match_and_insert! {
target = [{ #expansion }]
pattern = [{ #pallet_name = #pallet_path #pallet_instance }]
diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs
index d2f1857fb2b4..09f5290541d3 100644
--- a/substrate/frame/support/procedural/src/runtime/parse/pallet.rs
+++ b/substrate/frame/support/procedural/src/runtime/parse/pallet.rs
@@ -16,6 +16,7 @@
// limitations under the License.
use crate::construct_runtime::parse::{Pallet, PalletPart, PalletPartKeyword, PalletPath};
+use frame_support_procedural_tools::get_doc_literals;
use quote::ToTokens;
use syn::{punctuated::Punctuated, spanned::Spanned, token, Error, Ident, PathArguments};
@@ -86,6 +87,8 @@ impl Pallet {
let cfg_pattern = vec![];
+ let docs = get_doc_literals(&item.attrs);
+
Ok(Pallet {
is_expanded: true,
name,
@@ -94,6 +97,7 @@ impl Pallet {
instance,
cfg_pattern,
pallet_parts,
+ docs,
})
}
}
diff --git a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs
index 437a163cfbc4..e167d37d5f14 100644
--- a/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs
+++ b/substrate/frame/support/procedural/src/runtime/parse/pallet_decl.rs
@@ -21,13 +21,14 @@ use syn::{spanned::Spanned, Attribute, Ident, PathArguments};
/// The declaration of a pallet.
#[derive(Debug, Clone)]
pub struct PalletDeclaration {
- /// The name of the pallet, e.g.`System` in `System: frame_system`.
+ /// The name of the pallet, e.g.`System` in `pub type System = frame_system`.
pub name: Ident,
/// Optional attributes tagged right above a pallet declaration.
pub attrs: Vec,
- /// The path of the pallet, e.g. `frame_system` in `System: frame_system`.
+ /// The path of the pallet, e.g. `frame_system` in `pub type System = frame_system`.
pub path: syn::Path,
- /// The instance of the pallet, e.g. `Instance1` in `Council: pallet_collective::`.
+ /// The instance of the pallet, e.g. `Instance1` in `pub type Council =
+ /// pallet_collective`.
pub instance: Option,
}
diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs
index 62faed269d37..aa2f26f3a6a8 100644
--- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs
+++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs
@@ -201,6 +201,8 @@ fn transaction_payment_in_asset_possible() {
.base_weight(Weight::from_parts(base_weight, 0))
.build()
.execute_with(|| {
+ System::set_block_number(1);
+
// create the asset
let asset_id = 1;
let min_balance = 2;
@@ -246,6 +248,12 @@ fn transaction_payment_in_asset_possible() {
// check that fee was charged in the given asset
assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset);
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn {
+ asset_id,
+ who: caller,
+ amount: fee_in_asset,
+ }));
+
assert_ok!(ChargeAssetTxPayment::::post_dispatch(
Some(pre),
&info_from_weight(WEIGHT_5), // estimated tx weight
@@ -385,6 +393,8 @@ fn asset_transaction_payment_with_tip_and_refund() {
.base_weight(Weight::from_parts(base_weight, 0))
.build()
.execute_with(|| {
+ System::set_block_number(1);
+
// create the asset
let asset_id = 1;
let min_balance = 2;
@@ -434,6 +444,12 @@ fn asset_transaction_payment_with_tip_and_refund() {
)
.unwrap();
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn {
+ asset_id,
+ who: caller,
+ amount: fee_in_asset,
+ }));
+
assert_ok!(ChargeAssetTxPayment::::post_dispatch(
Some(pre),
&info_from_weight(WEIGHT_100),
@@ -451,6 +467,12 @@ fn asset_transaction_payment_with_tip_and_refund() {
balance - fee_in_asset + expected_token_refund
);
assert_eq!(Balances::free_balance(caller), 20 * balance_factor);
+
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited {
+ asset_id,
+ who: caller,
+ amount: expected_token_refund,
+ }));
});
}
diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs
index 8df98ceda997..098ecf11dd92 100644
--- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs
+++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs
@@ -157,6 +157,8 @@ fn transaction_payment_in_asset_possible() {
.base_weight(Weight::from_parts(base_weight, 0))
.build()
.execute_with(|| {
+ System::set_block_number(1);
+
// create the asset
let asset_id = 1;
let min_balance = 2;
@@ -188,6 +190,12 @@ fn transaction_payment_in_asset_possible() {
assert_eq!(Assets::balance(asset_id, caller), balance - fee);
assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), 0);
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn {
+ asset_id,
+ who: caller,
+ amount: fee,
+ }));
+
assert_ok!(ChargeAssetTxPayment::::post_dispatch(
Some(pre),
&info_from_weight(Weight::from_parts(weight, 0)),
@@ -198,6 +206,12 @@ fn transaction_payment_in_asset_possible() {
assert_eq!(Assets::balance(asset_id, caller), balance - fee);
// check that the block author gets rewarded
assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), fee);
+
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited {
+ asset_id,
+ who: BLOCK_AUTHOR,
+ amount: fee,
+ }));
});
}
@@ -263,6 +277,8 @@ fn asset_transaction_payment_with_tip_and_refund() {
.base_weight(Weight::from_parts(base_weight, 0))
.build()
.execute_with(|| {
+ System::set_block_number(1);
+
// create the asset
let asset_id = 1;
let min_balance = 2;
@@ -292,6 +308,12 @@ fn asset_transaction_payment_with_tip_and_refund() {
.unwrap();
assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip);
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn {
+ asset_id,
+ who: caller,
+ amount: fee_with_tip,
+ }));
+
let final_weight = 50;
assert_ok!(ChargeAssetTxPayment::::post_dispatch(
Some(pre),
@@ -304,6 +326,12 @@ fn asset_transaction_payment_with_tip_and_refund() {
fee_with_tip - (weight - final_weight) * min_balance / ExistentialDeposit::get();
assert_eq!(Assets::balance(asset_id, caller), balance - (final_fee));
assert_eq!(Assets::balance(asset_id, BLOCK_AUTHOR), final_fee);
+
+ System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Deposited {
+ asset_id,
+ who: caller,
+ amount: fee_with_tip - final_fee,
+ }));
});
}
diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs
index 6f644c5f790d..390c0ff71273 100644
--- a/substrate/primitives/consensus/beefy/src/lib.rs
+++ b/substrate/primitives/consensus/beefy/src/lib.rs
@@ -306,14 +306,14 @@ pub struct VoteMessage {
/// BEEFY happens when a voter votes on the same round/block for different payloads.
/// Proving is achieved by collecting the signed commitments of conflicting votes.
#[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)]
-pub struct EquivocationProof {
+pub struct DoubleVotingProof {
/// The first vote in the equivocation.
pub first: VoteMessage,
/// The second vote in the equivocation.
pub second: VoteMessage,
}
-impl EquivocationProof {
+impl DoubleVotingProof {
/// Returns the authority id of the equivocator.
pub fn offender_id(&self) -> &Id {
&self.first.id
@@ -347,7 +347,7 @@ where
/// Verifies the equivocation proof by making sure that both votes target
/// different blocks and that its signatures are valid.
pub fn check_equivocation_proof(
- report: &EquivocationProof::Signature>,
+ report: &DoubleVotingProof::Signature>,
) -> bool
where
Id: BeefyAuthorityId + PartialEq,
@@ -437,7 +437,7 @@ sp_api::decl_runtime_apis! {
/// hardcoded to return `None`). Only useful in an offchain context.
fn submit_report_equivocation_unsigned_extrinsic(
equivocation_proof:
- EquivocationProof, AuthorityId, ::Signature>,
+ DoubleVotingProof, AuthorityId, ::Signature>,
key_owner_proof: OpaqueKeyOwnershipProof,
) -> Option<()>;
diff --git a/substrate/primitives/consensus/beefy/src/test_utils.rs b/substrate/primitives/consensus/beefy/src/test_utils.rs
index ec13c9c69004..d7fd49214f12 100644
--- a/substrate/primitives/consensus/beefy/src/test_utils.rs
+++ b/substrate/primitives/consensus/beefy/src/test_utils.rs
@@ -18,7 +18,7 @@
#[cfg(feature = "bls-experimental")]
use crate::ecdsa_bls_crypto;
use crate::{
- ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, EquivocationProof, Payload,
+ ecdsa_crypto, AuthorityIdBound, BeefySignatureHasher, Commitment, DoubleVotingProof, Payload,
ValidatorSetId, VoteMessage,
};
use sp_application_crypto::{AppCrypto, AppPair, RuntimeAppPublic, Wraps};
@@ -140,7 +140,7 @@ impl From> for ecdsa_crypto::Public {
pub fn generate_equivocation_proof(
vote1: (u64, Payload, ValidatorSetId, &Keyring),
vote2: (u64, Payload, ValidatorSetId, &Keyring),
-) -> EquivocationProof {
+) -> DoubleVotingProof {
let signed_vote = |block_number: u64,
payload: Payload,
validator_set_id: ValidatorSetId,
@@ -151,5 +151,5 @@ pub fn generate_equivocation_proof(
};
let first = signed_vote(vote1.0, vote1.1, vote1.2, vote1.3);
let second = signed_vote(vote2.0, vote2.1, vote2.2, vote2.3);
- EquivocationProof { first, second }
+ DoubleVotingProof { first, second }
}
diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml
new file mode 100644
index 000000000000..6cd28c5a4936
--- /dev/null
+++ b/templates/minimal/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "minimal-template"
+description = "A minimal template built with Substrate, part of Polkadot Sdk."
+version = "0.0.0"
+license = "MIT-0"
+authors.workspace = true
+homepage.workspace = true
+repository.workspace = true
+edition.workspace = true
+publish = false
+
+[lints]
+workspace = true
+
+[dependencies]
+minimal-template-node = { path = "./node" }
+minimal-template-runtime = { path = "./runtime" }
+pallet-minimal-template = { path = "./pallets/template" }
+polkadot-sdk-docs = { path = "../../docs/sdk" }
+
+frame = { package = "polkadot-sdk-frame", path = "../../substrate/frame" }
+
+# How we build docs in rust-docs
+simple-mermaid = "0.1.1"
+docify = "0.2.7"
diff --git a/templates/minimal/README.md b/templates/minimal/README.md
index e69de29bb2d1..0541e393db93 100644
--- a/templates/minimal/README.md
+++ b/templates/minimal/README.md
@@ -0,0 +1,13 @@
+# Minimal Template
+
+This is a minimal template for creating a blockchain using the Polkadot SDK.
+
+# Docs
+
+You can generate and view the [Rust
+Docs](https://doc.rust-lang.org/cargo/commands/cargo-doc.html) for this template
+with this command:
+
+```sh
+cargo doc -p minimal-template --open
+```
diff --git a/templates/minimal/runtime/src/lib.rs b/templates/minimal/runtime/src/lib.rs
index 794f30a054a8..d2debbf5689f 100644
--- a/templates/minimal/runtime/src/lib.rs
+++ b/templates/minimal/runtime/src/lib.rs
@@ -15,6 +15,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//! A minimal runtime that includes the template [`pallet`](`pallet_minimal_template`).
+
#![cfg_attr(not(feature = "std"), no_std)]
// Make the WASM binary available.
@@ -24,6 +26,7 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
use frame::{
deps::frame_support::{
genesis_builder_helper::{build_state, get_preset},
+ runtime,
weights::{FixedFee, NoFee},
},
prelude::*,
@@ -36,6 +39,7 @@ use frame::{
},
};
+/// The runtime version.
#[runtime_version]
pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("minimal-template-runtime"),
@@ -54,61 +58,108 @@ pub fn native_version() -> NativeVersion {
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
}
+/// The signed extensions that are added to the runtime.
type SignedExtra = (
+ // Checks that the sender is not the zero address.
frame_system::CheckNonZeroSender,
+ // Checks that the runtime version is correct.
frame_system::CheckSpecVersion,
+ // Checks that the transaction version is correct.
frame_system::CheckTxVersion,
+ // Checks that the genesis hash is correct.
frame_system::CheckGenesis,
+ // Checks that the era is valid.
frame_system::CheckEra,
+ // Checks that the nonce is valid.
frame_system::CheckNonce,
+ // Checks that the weight is valid.
frame_system::CheckWeight,
+ // Ensures that the sender has enough funds to pay for the transaction
+ // and deducts the fee from the sender's account.
pallet_transaction_payment::ChargeTransactionPayment,
);
-construct_runtime!(
- pub enum Runtime {
- System: frame_system,
- Timestamp: pallet_timestamp,
-
- Balances: pallet_balances,
- Sudo: pallet_sudo,
- TransactionPayment: pallet_transaction_payment,
-
- // our local pallet
- Template: pallet_minimal_template,
- }
-);
+// Composes the runtime by adding all the used pallets and deriving necessary types.
+#[runtime]
+mod runtime {
+ /// The main runtime type.
+ #[runtime::runtime]
+ #[runtime::derive(
+ RuntimeCall,
+ RuntimeEvent,
+ RuntimeError,
+ RuntimeOrigin,
+ RuntimeFreezeReason,
+ RuntimeHoldReason,
+ RuntimeSlashReason,
+ RuntimeLockId,
+ RuntimeTask
+ )]
+ pub struct Runtime;
+
+ /// Mandatory system pallet that should always be included in a FRAME runtime.
+ #[runtime::pallet_index(0)]
+ pub type System = frame_system;
+
+ /// Provides a way for consensus systems to set and check the onchain time.
+ #[runtime::pallet_index(1)]
+ pub type Timestamp = pallet_timestamp;
+
+ /// Provides the ability to keep track of balances.
+ #[runtime::pallet_index(2)]
+ pub type Balances = pallet_balances;
+
+ /// Provides a way to execute privileged functions.
+ #[runtime::pallet_index(3)]
+ pub type Sudo = pallet_sudo;
+
+ /// Provides the ability to charge for extrinsic execution.
+ #[runtime::pallet_index(4)]
+ pub type TransactionPayment = pallet_transaction_payment;
+
+ /// A minimal pallet template.
+ #[runtime::pallet_index(5)]
+ pub type Template = pallet_minimal_template;
+}
parameter_types! {
pub const Version: RuntimeVersion = VERSION;
}
+/// Implements the types required for the system pallet.
#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig)]
impl frame_system::Config for Runtime {
type Block = Block;
type Version = Version;
- type BlockHashCount = ConstU32<1024>;
+ // Use the account data from the balances pallet
type AccountData = pallet_balances::AccountData<::Balance>;
}
+// Implements the types required for the balances pallet.
#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)]
impl pallet_balances::Config for Runtime {
type AccountStore = System;
}
+// Implements the types required for the sudo pallet.
#[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig)]
impl pallet_sudo::Config for Runtime {}
+// Implements the types required for the sudo pallet.
#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)]
impl pallet_timestamp::Config for Runtime {}
+// Implements the types required for the transaction payment pallet.
#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)]
impl pallet_transaction_payment::Config for Runtime {
type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter;
+ // Setting fee as independent of the weight of the extrinsic for demo purposes
type WeightToFee = NoFee<::Balance>;
+ // Setting fee as fixed for any length of the call data for demo purposes
type LengthToFee = FixedFee<1, ::Balance>;
}
+// Implements the types required for the template pallet.
impl pallet_minimal_template::Config for Runtime {}
type Block = frame::runtime::types_common::BlockOf;
diff --git a/templates/minimal/src/lib.rs b/templates/minimal/src/lib.rs
new file mode 100644
index 000000000000..68825d190bb2
--- /dev/null
+++ b/templates/minimal/src/lib.rs
@@ -0,0 +1,75 @@
+//! # Minimal Template
+//!
+//! This is a minimal template for creating a blockchain using the Polkadot SDK.
+//!
+//! ## Components
+//!
+//! The template consists of the following components:
+//!
+//! ### Node
+//!
+//! A minimal blockchain [`node`](`minimal_template_node`) that is capable of running a
+//! runtime. It uses a simple chain specification, provides an option to choose Manual or
+//! InstantSeal for consensus and exposes a few commands to interact with the node.
+//!
+//! ### Runtime
+//!
+//! A minimal [`runtime`](`minimal_template_runtime`) (or a state transition function) that
+//! is capable of being run on the node. It is built using the [`FRAME`](`frame`) framework
+//! that enables the composition of the core logic via separate modules called "pallets".
+//! FRAME defines a complete DSL for building such pallets and the runtime itself.
+//!
+//! #### Transaction Fees
+//!
+//! The runtime charges a transaction fee for every transaction that is executed. The fee is
+//! calculated based on the weight of the transaction (accouting for the execution time) and
+//! length of the call data. Please refer to
+//! [`benchmarking docs`](`polkadot_sdk_docs::reference_docs::frame_benchmarking_weight`) for
+//! more information on how the weight is calculated.
+//!
+//! This template sets the fee as independent of the weight of the extrinsic and fixed for any
+//! length of the call data for demo purposes.
+//!
+//! ### Pallet
+//!
+//! A minimal [`pallet`](`pallet_minimal_template`) that is built using FRAME. It is a unit of
+//! encapsulated logic that has a clearly defined responsibility and can be linked to other pallets.
+//!
+//! ## Getting Started
+//!
+//! To get started with the template, follow the steps below:
+//!
+//! ### Build the Node
+//!
+//! Build the node using the following command:
+//!
+//! ```bash
+//! cargo build -p minimal-template-node --release
+//! ```
+//!
+//! ### Run the Node
+//!
+//! Run the node using the following command:
+//!
+//! ```bash
+//! ./target/release/minimal-template-node --dev
+//! ```
+//!
+//! ### CLI Options
+//!
+//! The node exposes a few options that can be used to interact with the node. To see the list of
+//! available options, run the following command:
+//!
+//! ```bash
+//! ./target/release/minimal-template-node --help
+//! ```
+//!
+//! #### Consensus Algorithm
+//!
+//! In order to run the node with a specific consensus algorithm, use the `--consensus` flag. For
+//! example, to run the node with ManualSeal consensus with a block time of 5000ms, use the
+//! following command:
+//!
+//! ```bash
+//! ./target/release/minimal-template-node --dev --consensus manual-seal-5000
+//! ```