From ca7c01c8d85f10cf330008686a03ce362202ae8e Mon Sep 17 00:00:00 2001 From: gui Date: Thu, 18 Apr 2024 00:22:46 +0900 Subject: [PATCH 01/74] Improve doc for pallet macro and config macro (#4146) Improve doc: * the pallet macro is actually referring to 2 places, for the module and for the struct placeholder but doesn't really clarify it (I should have named the latter just `pallet_struct` or something but it is a bit late) * The doc of `with_default` is a bit confusing too IMO. CC @kianenigma --------- Co-authored-by: Liam Aharon --- substrate/frame/support/src/lib.rs | 141 +++++++++++++++++++++-------- 1 file changed, 103 insertions(+), 38 deletions(-) diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 895215d364e3..984a7f7537fe 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -936,6 +936,83 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } +/// The pallet macro has 2 purposes: +/// +/// * [For declaring a pallet as a rust module](#1---pallet-module-declaration) +/// * [For declaring the `struct` placeholder of a +/// pallet](#2---pallet-struct-placeholder-declaration) +/// +/// # 1 - Pallet module declaration +/// +/// The module to declare a pallet is organized as follow: +/// ``` +/// #[frame_support::pallet] // <- the macro +/// mod pallet { +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} +/// +/// #[pallet::call] +/// impl Pallet { +/// } +/// +/// /* ... */ +/// } +/// ``` +/// +/// The documentation for each individual part can be found at [frame_support::pallet_macros] +/// +/// ## Dev Mode (`#[pallet(dev_mode)]`) +/// +/// Syntax: +/// +/// ``` +/// #[frame_support::pallet(dev_mode)] +/// mod pallet { +/// # #[pallet::pallet] +/// # pub struct Pallet(_); +/// # #[pallet::config] +/// # pub trait Config: frame_system::Config {} +/// /* ... */ +/// } +/// ``` +/// +/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The +/// aim of dev mode is to loosen some of the restrictions and requirements placed on +/// production pallets for easy tinkering and development. Dev mode pallets should not be +/// used in production. Enabling dev mode has the following effects: +/// +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not +/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not +/// specify a weight. +/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a call index based on the order of the call. +/// * All storages are marked as unbounded, meaning you do not need to implement +/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is +/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. +/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, +/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` +/// can simply be ignored when in `dev_mode`. +/// +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This +/// argument cannot be specified anywhere else, including but not limited to the +/// `#[pallet::pallet]` attribute macro. +/// +///
+/// WARNING:
+/// You should never deploy or use dev mode pallets in production. Doing so can break your
+/// chain. Once you are done tinkering, you should
+/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
+/// errors before attempting to use your pallet in a production scenario.
+/// 
+/// +/// # 2 - Pallet struct placeholder declaration +/// /// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to /// specify pallet information. /// @@ -984,40 +1061,6 @@ pub mod pallet_prelude { /// [`StorageInfoTrait`](frame_support::traits::StorageInfoTrait) for the pallet using the /// [`PartialStorageInfoTrait`](frame_support::traits::PartialStorageInfoTrait) /// implementation of storages. -/// -/// ## Dev Mode (`#[pallet(dev_mode)]`) -/// -/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The -/// aim of dev mode is to loosen some of the restrictions and requirements placed on -/// production pallets for easy tinkering and development. Dev mode pallets should not be -/// used in production. Enabling dev mode has the following effects: -/// -/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not -/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not -/// specify a weight. -/// * Call indices no longer need to be specified on every `#[pallet::call]` declaration. By -/// default, dev mode pallets will assume a call index based on the order of the call. -/// * All storages are marked as unbounded, meaning you do not need to implement -/// [`MaxEncodedLen`](frame_support::pallet_prelude::MaxEncodedLen) on storage types. This is -/// equivalent to specifying `#[pallet::unbounded]` on all storage type definitions. -/// * Storage hashers no longer need to be specified and can be replaced by `_`. In dev mode, -/// these will be replaced by `Blake2_128Concat`. In case of explicit key-binding, `Hasher` -/// can simply be ignored when in `dev_mode`. -/// -/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or -/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This -/// argument cannot be specified anywhere else, including but not limited to the -/// `#[pallet::pallet]` attribute macro. -/// -///
-/// WARNING:
-/// You should not deploy or use dev mode pallets in production. Doing so can break your
-/// chain and therefore should never be done. Once you are done tinkering, you should
-/// remove the 'dev_mode' argument from your #[pallet] declaration and fix any compile
-/// errors before attempting to use your pallet in a production scenario.
-/// 
pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the `pallet::` macros @@ -1456,16 +1499,24 @@ pub mod pallet_macros { /// # use core::fmt::Debug; /// # use frame_support::traits::Contains; /// # + /// # pub trait SomeMoreComplexBound {} + /// # /// #[pallet::pallet] /// pub struct Pallet(_); /// /// #[pallet::config(with_default)] // <- with_default is optional /// pub trait Config: frame_system::Config { /// /// The overarching event type. - /// #[pallet::no_default_bounds] // Default is not supported for RuntimeEvent + /// #[pallet::no_default_bounds] // Default with bounds is not supported for RuntimeEvent /// type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// - /// // ...other config items get default + /// /// A more complex type. + /// #[pallet::no_default] // Example of type where no default should be provided + /// type MoreComplexType: SomeMoreComplexBound; + /// + /// /// A simple type. + /// // Default with bounds is supported for simple types + /// type SimpleType: From; /// } /// /// #[pallet::event] @@ -1475,12 +1526,23 @@ pub mod pallet_macros { /// } /// ``` /// - /// As shown above, you may also attach the [`#[pallet::no_default]`](`no_default`) + /// As shown above: + /// * you may attach the [`#[pallet::no_default]`](`no_default`) /// attribute to specify that a particular trait item _cannot_ be used as a default when a /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) /// attribute macro. This will cause that particular trait item to simply not appear in /// default testing configs based on this config (the trait item will not be included in /// `DefaultConfig`). + /// * you may attach the [`#[pallet::no_default_bounds]`](`no_default_bounds`) + /// attribute to specify that a particular trait item can be used as a default when a + /// test `Config` is derived using the [`#[derive_impl(..)]`](`frame_support::derive_impl`) + /// attribute macro. But its bounds cannot be enforced at this point and should be + /// discarded when generating the default config trait. + /// * you may not specify any attribute to generate a trait item in the default config + /// trait. + /// + /// In case origin of error is not clear it is recommended to disable all default with + /// [`#[pallet::no_default]`](`no_default`) and enable them one by one. /// /// ### `DefaultConfig` Caveats /// @@ -1500,7 +1562,10 @@ pub mod pallet_macros { /// the `DefaultConfig` trait, and therefore any impl of `DefaultConfig` doesn't need to /// implement such items. /// - /// For more information, see [`frame_support::derive_impl`]. + /// For more information, see: + /// * [`frame_support::derive_impl`]. + /// * [`#[pallet::no_default]`](`no_default`) + /// * [`#[pallet::no_default_bounds]`](`no_default_bounds`) pub use frame_support_procedural::config; /// Allows defining an enum that gets composed as an aggregate enum by `construct_runtime`. From bfbf7f5d6f5c491a820bb0a4fb9508ce52192a06 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 17 Apr 2024 18:29:29 +0300 Subject: [PATCH 02/74] chainHead: Report unique hashes for pruned blocks (#3667) This PR ensures that the reported pruned blocks are unique. While at it, ensure that the best block event is properly generated when the last best block is a fork that will be pruned in the future. To achieve this, the chainHead keeps a LRU set of reported pruned blocks to ensure the following are not reported twice: ```bash finalized -> block 1 -> block 2 -> block 3 -> block 2 -> block 4 -> block 5 -> block 1 -> block 2_f -> block 6 -> block 7 -> block 8 ``` When block 7 is finalized the branch [block 2; block 3] is reported as pruned. When block 8 is finalized the branch [block 2; block 4; block 5] should be reported as pruned, however block 2 was already reported as pruned at the previous step. This is a side-effect of the pruned blocks being reported at level N - 1. For example, if all pruned forks would be reported with the first encounter (when block 6 is finalized we know that block 3 and block 5 are stale), we would not need the LRU cache. cc @paritytech/subxt-team Closes https://github.com/paritytech/polkadot-sdk/issues/3658 --------- Signed-off-by: Alexandru Vasile Co-authored-by: Sebastian Kunert --- Cargo.lock | 1 + substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../rpc-spec-v2/src/chain_head/chain_head.rs | 2 +- .../src/chain_head/chain_head_follow.rs | 108 ++++----- .../rpc-spec-v2/src/chain_head/tests.rs | 213 ++++++++++++++++++ 5 files changed, 273 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aeb8b2cdb190..7bf5215b6dec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17465,6 +17465,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", + "schnellru", "serde", "serde_json", "sp-api", diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index e2612d914542..f2fc7bee6e20 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -42,6 +42,7 @@ array-bytes = "6.1" log = { workspace = true, default-features = true } futures-util = { version = "0.3.30", default-features = false } rand = "0.8.5" +schnellru = "0.2.1" [dev-dependencies] jsonrpsee = { version = "0.22", features = ["server", "ws-client"] } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 86d9a726d7be..6779180a4146 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -75,7 +75,7 @@ pub struct ChainHeadConfig { /// Maximum pinned blocks across all connections. /// This number is large enough to consider immediate blocks. /// Note: This should never exceed the `PINNING_CACHE_SIZE` from client/db. -const MAX_PINNED_BLOCKS: usize = 512; +pub(crate) const MAX_PINNED_BLOCKS: usize = 512; /// Any block of any subscription should not be pinned more than /// this constant. When a subscription contains a block older than this, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs index 0d87a45c07e2..a753896b24c2 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_follow.rs @@ -19,7 +19,7 @@ //! Implementation of the `chainHead_follow` method. use crate::chain_head::{ - chain_head::LOG_TARGET, + chain_head::{LOG_TARGET, MAX_PINNED_BLOCKS}, event::{ BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, RuntimeEvent, RuntimeVersionEvent, @@ -37,6 +37,7 @@ use sc_client_api::{ Backend, BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, }; use sc_rpc::utils::to_sub_message; +use schnellru::{ByLength, LruMap}; use sp_api::CallApiAt; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, Info, @@ -68,7 +69,9 @@ pub struct ChainHeadFollower, Block: BlockT, Client> { /// Subscription ID. sub_id: String, /// The best reported block by this subscription. - best_block_cache: Option, + current_best_block: Option, + /// LRU cache of pruned blocks. + pruned_blocks: LruMap, /// Stop all subscriptions if the distance between the leaves and the current finalized /// block is larger than this value. max_lagging_distance: usize, @@ -90,7 +93,10 @@ impl, Block: BlockT, Client> ChainHeadFollower, - ) -> Result<(Vec>, HashSet), SubscriptionManagementError> - { + ) -> Result>, SubscriptionManagementError> { let init = self.get_init_blocks_with_forks(startup_point.finalized_hash)?; // The initialized event is the first one sent. let initial_blocks = init.finalized_block_descendants; let finalized_block_hashes = init.finalized_block_hashes; + // These are the pruned blocks that we should not report again. + for pruned in init.pruned_forks { + self.pruned_blocks.insert(pruned, ()); + } let finalized_block_hash = startup_point.finalized_hash; let finalized_block_runtime = self.generate_runtime_event(finalized_block_hash, None); @@ -345,11 +353,11 @@ where let best_block_hash = startup_point.best_hash; if best_block_hash != finalized_block_hash { let best_block = FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash }); - self.best_block_cache = Some(best_block_hash); + self.current_best_block = Some(best_block_hash); finalized_block_descendants.push(best_block); }; - Ok((finalized_block_descendants, init.pruned_forks)) + Ok(finalized_block_descendants) } /// Generate the "NewBlock" event and potentially the "BestBlockChanged" event for the @@ -377,19 +385,19 @@ where let best_block_event = FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash: block_hash }); - match self.best_block_cache { + match self.current_best_block { Some(block_cache) => { // The RPC layer has not reported this block as best before. // Note: This handles the race with the finalized branch. if block_cache != block_hash { - self.best_block_cache = Some(block_hash); + self.current_best_block = Some(block_hash); vec![new_block, best_block_event] } else { vec![new_block] } }, None => { - self.best_block_cache = Some(block_hash); + self.current_best_block = Some(block_hash); vec![new_block, best_block_event] }, } @@ -458,7 +466,7 @@ where // When the node falls out of sync and then syncs up to the tip of the chain, it can // happen that we skip notifications. Then it is better to terminate the connection // instead of trying to send notifications for all missed blocks. - if let Some(best_block_hash) = self.best_block_cache { + if let Some(best_block_hash) = self.current_best_block { let ancestor = sp_blockchain::lowest_common_ancestor( &*self.client, *hash, @@ -481,13 +489,10 @@ where } /// Get all pruned block hashes from the provided stale heads. - /// - /// The result does not include hashes from `to_ignore`. fn get_pruned_hashes( - &self, + &mut self, stale_heads: &[Block::Hash], last_finalized: Block::Hash, - to_ignore: &mut HashSet, ) -> Result, SubscriptionManagementError> { let blockchain = self.backend.blockchain(); let mut pruned = Vec::new(); @@ -497,11 +502,13 @@ where // Collect only blocks that are not part of the canonical chain. pruned.extend(tree_route.enacted().iter().filter_map(|block| { - if !to_ignore.remove(&block.hash) { - Some(block.hash) - } else { - None + if self.pruned_blocks.get(&block.hash).is_some() { + // The block was already reported as pruned. + return None } + + self.pruned_blocks.insert(block.hash, ()); + Some(block.hash) })) } @@ -515,7 +522,6 @@ where fn handle_finalized_blocks( &mut self, notification: FinalityNotification, - to_ignore: &mut HashSet, startup_point: &StartupPoint, ) -> Result>, SubscriptionManagementError> { let last_finalized = notification.hash; @@ -536,25 +542,32 @@ where // Report all pruned blocks from the notification that are not // part of the fork we need to ignore. let pruned_block_hashes = - self.get_pruned_hashes(¬ification.stale_heads, last_finalized, to_ignore)?; + self.get_pruned_hashes(¬ification.stale_heads, last_finalized)?; let finalized_event = FollowEvent::Finalized(Finalized { finalized_block_hashes, pruned_block_hashes: pruned_block_hashes.clone(), }); - match self.best_block_cache { - Some(block_cache) => { - // If the best block wasn't pruned, we are done here. - if !pruned_block_hashes.iter().any(|hash| *hash == block_cache) { - events.push(finalized_event); - return Ok(events) - } - - // The best block is reported as pruned. Therefore, we need to signal a new - // best block event before submitting the finalized event. + if let Some(current_best_block) = self.current_best_block { + // The best reported block is in the pruned list. Report a new best block. + let is_in_pruned_list = + pruned_block_hashes.iter().any(|hash| *hash == current_best_block); + // The block is not the last finalized block. + // + // It can be either: + // - a descendant of the last finalized block + // - a block on a fork that will be pruned in the future. + // + // In those cases, we emit a new best block. + let is_not_last_finalized = current_best_block != last_finalized; + + if is_in_pruned_list || is_not_last_finalized { + // We need to generate a best block event. let best_block_hash = self.client.info().best_hash; - if best_block_hash == block_cache { + + // Defensive check against state missmatch. + if best_block_hash == current_best_block { // The client doest not have any new information about the best block. // The information from `.info()` is updated from the DB as the last // step of the finalization and it should be up to date. @@ -564,23 +577,18 @@ where "[follow][id={:?}] Client does not contain different best block", self.sub_id, ); - events.push(finalized_event); - Ok(events) } else { // The RPC needs to also submit a new best block changed before the // finalized event. - self.best_block_cache = Some(best_block_hash); - let best_block_event = - FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash }); - events.extend([best_block_event, finalized_event]); - Ok(events) + self.current_best_block = Some(best_block_hash); + events + .push(FollowEvent::BestBlockChanged(BestBlockChanged { best_block_hash })); } - }, - None => { - events.push(finalized_event); - Ok(events) - }, + } } + + events.push(finalized_event); + Ok(events) } /// Submit the events from the provided stream to the RPC client @@ -589,7 +597,6 @@ where &mut self, startup_point: &StartupPoint, mut stream: EventStream, - mut to_ignore: HashSet, sink: SubscriptionSink, rx_stop: oneshot::Receiver<()>, ) -> Result<(), SubscriptionManagementError> @@ -612,7 +619,7 @@ where NotificationType::NewBlock(notification) => self.handle_import_blocks(notification, &startup_point), NotificationType::Finalized(notification) => - self.handle_finalized_blocks(notification, &mut to_ignore, &startup_point), + self.handle_finalized_blocks(notification, &startup_point), NotificationType::MethodResponse(notification) => Ok(vec![notification]), }; @@ -682,7 +689,7 @@ where .map(|response| NotificationType::MethodResponse(response)); let startup_point = StartupPoint::from(self.client.info()); - let (initial_events, pruned_forks) = match self.generate_init_events(&startup_point) { + let initial_events = match self.generate_init_events(&startup_point) { Ok(blocks) => blocks, Err(err) => { debug!( @@ -702,7 +709,6 @@ where let merged = tokio_stream::StreamExt::merge(merged, stream_responses); let stream = stream::once(futures::future::ready(initial)).chain(merged); - self.submit_events(&startup_point, stream.boxed(), pruned_forks, sink, sub_data.rx_stop) - .await + self.submit_events(&startup_point, stream.boxed(), sink, sub_data.rx_stop).await } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c2bff7c50d5e..14f664858a0d 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -187,6 +187,62 @@ async fn setup_api() -> ( (client, api, sub, sub_id, block) } +async fn import_block( + mut client: Arc>, + parent_hash: ::Hash, + parent_number: u64, +) -> Block { + let block = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(parent_number) + .build() + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + block +} + +async fn import_best_block_with_tx( + mut client: Arc>, + parent_hash: ::Hash, + parent_number: u64, + tx: Transfer, +) -> Block { + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .with_parent_block_number(parent_number) + .build() + .unwrap(); + block_builder.push_transfer(tx).unwrap(); + let block = block_builder.build().unwrap().block; + client.import_as_best(BlockOrigin::Own, block.clone()).await.unwrap(); + block +} + +/// Check the subscription produces a new block and a best block event. +/// +/// The macro is used instead of a fn to preserve the lines of code in case of panics. +macro_rules! check_new_and_best_block_events { + ($sub:expr, $block_hash:expr, $parent_hash:expr) => { + let event: FollowEvent = get_next_event($sub).await; + let expected = FollowEvent::NewBlock(NewBlock { + block_hash: format!("{:?}", $block_hash), + parent_block_hash: format!("{:?}", $parent_hash), + new_runtime: None, + with_runtime: false, + }); + assert_eq!(event, expected); + + let event: FollowEvent = get_next_event($sub).await; + let expected = FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: format!("{:?}", $block_hash), + }); + assert_eq!(event, expected); + }; +} + #[tokio::test] async fn follow_subscription_produces_blocks() { let builder = TestClientBuilder::new(); @@ -3644,3 +3700,160 @@ async fn chain_head_limit_reached() { // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; } + +#[tokio::test] +async fn follow_unique_pruned_blocks() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + max_follow_subscriptions_per_connection: MAX_FOLLOW_SUBSCRIPTIONS_PER_CONNECTION, + max_lagging_distance: MAX_LAGGING_DISTANCE, + }, + ) + .into_rpc(); + + let finalized_hash = client.info().finalized_hash; + let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + + // Initialized must always be reported first. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![format!("{:?}", finalized_hash)], + finalized_block_runtime: None, + with_runtime: false, + }); + assert_eq!(event, expected); + + // Block tree: + // + // finalized -> block 1 -> block 2 -> block 3 + // + // -> block 2 -> block 4 -> block 5 + // + // -> block 1 -> block 2_f -> block 6 + // ^^^ finalized + // -> block 7 + // ^^^ finalized + // -> block 8 + // ^^^ finalized + // The chainHead will see block 5 as the best block. However, the + // client will finalize the block 6, which is on another fork. + // + // When the block 6 is finalized, block 2 block 3 block 4 and block 5 are placed on an invalid + // fork. However, pruning of blocks happens on level N - 1. + // Therefore, no pruned blocks are reported yet. + // + // When the block 7 is finalized, block 3 is detected as stale. At this step, block 2 and 3 + // are reported as pruned. + // + // When the block 8 is finalized, block 5 block 4 and block 2 are detected as stale. However, + // only blocks 5 and 4 are reported as pruned. This is because the block 2 was previously + // reported. + + // Initial setup steps: + let block_1_hash = + import_block(client.clone(), client.chain_info().genesis_hash, 0).await.hash(); + let block_2_f_hash = import_block(client.clone(), block_1_hash, 1).await.hash(); + let block_6_hash = import_block(client.clone(), block_2_f_hash, 2).await.hash(); + // Import block 2 as best on the fork. + let mut tx_alice_ferdie = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }; + let block_2_hash = + import_best_block_with_tx(client.clone(), block_1_hash, 1, tx_alice_ferdie.clone()) + .await + .hash(); + + let block_3_hash = import_block(client.clone(), block_2_hash, 2).await.hash(); + // Fork block 4. + tx_alice_ferdie.nonce = 1; + let block_4_hash = import_best_block_with_tx(client.clone(), block_2_hash, 2, tx_alice_ferdie) + .await + .hash(); + let block_5_hash = import_block(client.clone(), block_4_hash, 3).await.hash(); + + // Check expected events generated by the setup. + { + // Check block 1 -> block 2f -> block 6. + check_new_and_best_block_events!(&mut sub, block_1_hash, finalized_hash); + check_new_and_best_block_events!(&mut sub, block_2_f_hash, block_1_hash); + check_new_and_best_block_events!(&mut sub, block_6_hash, block_2_f_hash); + + // Check (block 1 ->) block 2 -> block 3. + check_new_and_best_block_events!(&mut sub, block_2_hash, block_1_hash); + check_new_and_best_block_events!(&mut sub, block_3_hash, block_2_hash); + + // Check (block 1 -> block 2 ->) block 4 -> block 5. + check_new_and_best_block_events!(&mut sub, block_4_hash, block_2_hash); + check_new_and_best_block_events!(&mut sub, block_5_hash, block_4_hash); + } + + // Finalize the block 6 from the fork. + client.finalize_block(block_6_hash, None).unwrap(); + + // Expect to report the best block changed before the finalized event. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: format!("{:?}", block_6_hash), + }); + assert_eq!(event, expected); + + // Block 2 must be reported as pruned, even if it was the previous best. + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![ + format!("{:?}", block_1_hash), + format!("{:?}", block_2_f_hash), + format!("{:?}", block_6_hash), + ], + pruned_block_hashes: vec![], + }); + assert_eq!(event, expected); + + // Pruned hash can be unpinned. + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + let hash = format!("{:?}", block_2_hash); + let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); + + // Import block 7 and check it. + let block_7_hash = import_block(client.clone(), block_6_hash, 3).await.hash(); + check_new_and_best_block_events!(&mut sub, block_7_hash, block_6_hash); + + // Finalize the block 7. + client.finalize_block(block_7_hash, None).unwrap(); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![format!("{:?}", block_7_hash)], + pruned_block_hashes: vec![format!("{:?}", block_2_hash), format!("{:?}", block_3_hash)], + }); + assert_eq!(event, expected); + + // Check block 8. + let block_8_hash = import_block(client.clone(), block_7_hash, 4).await.hash(); + check_new_and_best_block_events!(&mut sub, block_8_hash, block_7_hash); + + // Finalize the block 8. + client.finalize_block(block_8_hash, None).unwrap(); + + let event: FollowEvent = get_next_event(&mut sub).await; + let expected = FollowEvent::Finalized(Finalized { + finalized_block_hashes: vec![format!("{:?}", block_8_hash)], + pruned_block_hashes: vec![format!("{:?}", block_4_hash), format!("{:?}", block_5_hash)], + }); + assert_eq!(event, expected); +} From 7a2c9d4a9ae495e9165975da234ff4b67f5bfeb4 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 17 Apr 2024 18:52:00 +0300 Subject: [PATCH 03/74] Fix nostd build of several crates (#4060) Preparation for https://github.com/paritytech/polkadot-sdk/pull/3935 Changes: - Add some `default-features = false` for the case that a crate and that dependency both support nostd builds. - Shuffle files around of some benchmarking-only crates. These conditionally disabled the `cfg_attr` for nostd and pulled in libstd. Example [here](https://github.com/ggwpez/zepter/pull/95). The actual logic is moved into a `inner.rs` to preserve nostd capability of the crate in case the benchmarking feature is disabled. - Add some `use sp_std::vec` where needed. - Remove some `optional = true` in cases where it was not optional. - Removed one superfluous `cfg_attr(not(feature = "std"), no_std..`. All in all this should be logical no-op. --------- Signed-off-by: Oliver Tale-Yazdi --- .../pallets/session-benchmarking/src/inner.rs | 42 + .../pallets/session-benchmarking/src/lib.rs | 34 +- .../assets/asset-hub-rococo/Cargo.toml | 10 +- .../assets/asset-hub-rococo/src/lib.rs | 19 - .../glutton/glutton-westend/Cargo.toml | 4 +- .../primitives/parachain-inherent/Cargo.toml | 6 +- polkadot/primitives/Cargo.toml | 3 +- polkadot/runtime/parachains/Cargo.toml | 3 +- .../xcm-executor/integration-tests/src/lib.rs | 1 - prdoc/pr_4060.prdoc | 54 ++ substrate/frame/Cargo.toml | 2 +- substrate/frame/atomic-swap/src/lib.rs | 1 + .../benchmarking/src/inner.rs | 89 ++ .../benchmarking/src/lib.rs | 74 +- substrate/frame/examples/dev-mode/src/lib.rs | 1 + .../frame/examples/offchain-worker/Cargo.toml | 2 +- substrate/frame/indices/Cargo.toml | 3 +- substrate/frame/nomination-pools/Cargo.toml | 4 +- .../benchmarking/src/inner.rs | 846 ++++++++++++++++++ .../nomination-pools/benchmarking/src/lib.rs | 835 +---------------- .../frame/offences/benchmarking/src/inner.rs | 250 ++++++ .../frame/offences/benchmarking/src/lib.rs | 238 +---- .../frame/offences/benchmarking/src/mock.rs | 5 +- substrate/frame/root-offences/Cargo.toml | 5 +- substrate/frame/root-offences/src/lib.rs | 5 +- .../frame/session/benchmarking/src/inner.rs | 162 ++++ .../frame/session/benchmarking/src/lib.rs | 152 +--- substrate/frame/src/lib.rs | 4 +- .../frame/system/benchmarking/src/inner.rs | 230 +++++ .../frame/system/benchmarking/src/lib.rs | 220 +---- substrate/frame/try-runtime/src/inner.rs | 50 ++ substrate/frame/try-runtime/src/lib.rs | 35 +- .../primitives/consensus/babe/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/session/Cargo.toml | 4 +- .../transaction-storage-proof/Cargo.toml | 4 +- 36 files changed, 1800 insertions(+), 1601 deletions(-) create mode 100644 cumulus/pallets/session-benchmarking/src/inner.rs create mode 100644 prdoc/pr_4060.prdoc create mode 100644 substrate/frame/election-provider-support/benchmarking/src/inner.rs create mode 100644 substrate/frame/nomination-pools/benchmarking/src/inner.rs create mode 100644 substrate/frame/offences/benchmarking/src/inner.rs create mode 100644 substrate/frame/session/benchmarking/src/inner.rs create mode 100644 substrate/frame/system/benchmarking/src/inner.rs create mode 100644 substrate/frame/try-runtime/src/inner.rs diff --git a/cumulus/pallets/session-benchmarking/src/inner.rs b/cumulus/pallets/session-benchmarking/src/inner.rs new file mode 100644 index 000000000000..cffd0776f3d9 --- /dev/null +++ b/cumulus/pallets/session-benchmarking/src/inner.rs @@ -0,0 +1,42 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking setup for pallet-session. + +use sp_std::{prelude::*, vec}; + +use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_system::RawOrigin; +use pallet_session::*; +use parity_scale_codec::Decode; +pub struct Pallet(pallet_session::Pallet); +pub trait Config: pallet_session::Config {} + +benchmarks! { + set_keys { + let caller: T::AccountId = whitelisted_caller(); + frame_system::Pallet::::inc_providers(&caller); + let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + }: _(RawOrigin::Signed(caller), keys, proof) + + purge_keys { + let caller: T::AccountId = whitelisted_caller(); + frame_system::Pallet::::inc_providers(&caller); + let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + let _t = pallet_session::Pallet::::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof); + }: _(RawOrigin::Signed(caller)) +} diff --git a/cumulus/pallets/session-benchmarking/src/lib.rs b/cumulus/pallets/session-benchmarking/src/lib.rs index f474def6b137..a95d6fb7d591 100644 --- a/cumulus/pallets/session-benchmarking/src/lib.rs +++ b/cumulus/pallets/session-benchmarking/src/lib.rs @@ -1,3 +1,5 @@ +// This file is part of Substrate. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -13,31 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Benchmarking setup for pallet-session -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "runtime-benchmarks")] -use sp_std::{prelude::*, vec}; +//! Benchmarks for the Session Pallet. +// This is separated into its own crate due to cyclic dependency issues. -use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_system::RawOrigin; -use pallet_session::*; -use parity_scale_codec::Decode; -pub struct Pallet(pallet_session::Pallet); -pub trait Config: pallet_session::Config {} +#![cfg_attr(not(feature = "std"), no_std)] -benchmarks! { - set_keys { - let caller: T::AccountId = whitelisted_caller(); - frame_system::Pallet::::inc_providers(&caller); - let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - }: _(RawOrigin::Signed(caller), keys, proof) +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; - purge_keys { - let caller: T::AccountId = whitelisted_caller(); - frame_system::Pallet::::inc_providers(&caller); - let keys = T::Keys::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - let _t = pallet_session::Pallet::::set_keys(RawOrigin::Signed(caller.clone()).into(), keys, proof); - }: _(RawOrigin::Signed(caller)) -} +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index e8be734214f4..47574783810a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -37,7 +37,7 @@ pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } +pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } @@ -102,14 +102,6 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", [features] default = ["std"] -# When enabled the `state_version` is set to `1`. -# This means that the chain will start using the new state format. The migration is lazy, so -# it requires to write a storage value to use the new state format. To migrate all the other -# storage values that aren't touched the state migration pallet is added as well. -# This pallet will migrate the entire state, controlled through some account. -# -# This feature should be removed when the main-net will be migrated. -state-trie-version-1 = ["pallet-state-trie-migration"] runtime-benchmarks = [ "assets-common/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 30e211a8f1d0..5cb29343a1cf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -107,7 +107,6 @@ impl_opaque_keys! { } } -#[cfg(feature = "state-trie-version-1")] #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), @@ -120,19 +119,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 1, }; -#[cfg(not(feature = "state-trie-version-1"))] -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), - authoring_version: 1, - spec_version: 1_010_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 14, - state_version: 0, -}; - /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { @@ -953,7 +939,6 @@ construct_runtime!( PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, - #[cfg(feature = "state-trie-version-1")] StateTrieMigration: pallet_state_trie_migration = 70, // TODO: the pallet instance should be removed once all pools have migrated @@ -1695,7 +1680,6 @@ cumulus_pallet_parachain_system::register_validate_block! { BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } -#[cfg(feature = "state-trie-version-1")] parameter_types! { // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) pub const MigrationSignedDepositPerItem: Balance = CENTS; @@ -1703,7 +1687,6 @@ parameter_types! { pub const MigrationMaxKeyLen: u32 = 512; } -#[cfg(feature = "state-trie-version-1")] impl pallet_state_trie_migration::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; @@ -1721,13 +1704,11 @@ impl pallet_state_trie_migration::Config for Runtime { type MaxKeyLen = MigrationMaxKeyLen; } -#[cfg(feature = "state-trie-version-1")] frame_support::ord_parameter_types! { pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); } -#[cfg(feature = "state-trie-version-1")] #[test] fn ensure_key_ss58() { use frame_support::traits::SortedMembers; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index fe9cd25841bf..808bed387327 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -22,8 +22,8 @@ frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/r frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true } -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true } +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index fcf4c93bc2f0..4da561661b6b 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -17,8 +17,8 @@ scale-info = { version = "2.11.1", default-features = false, features = ["derive # Substrate sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true } -sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true } +sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true, default-features = false } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true, default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } @@ -34,6 +34,8 @@ std = [ "scale-info/std", "sp-core/std", "sp-inherents/std", + "sp-runtime?/std", + "sp-state-machine?/std", "sp-std/std", "sp-trie/std", ] diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 004fa62acf34..99800afc37fe 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -26,7 +26,7 @@ sp-arithmetic = { path = "../../substrate/primitives/arithmetic", default-featur sp-authority-discovery = { path = "../../substrate/primitives/authority-discovery", default-features = false, features = ["serde"] } sp-consensus-slots = { path = "../../substrate/primitives/consensus/slots", default-features = false, features = ["serde"] } sp-io = { path = "../../substrate/primitives/io", default-features = false } -sp-keystore = { path = "../../substrate/primitives/keystore", optional = true } +sp-keystore = { path = "../../substrate/primitives/keystore", optional = true, default-features = false } sp-staking = { path = "../../substrate/primitives/staking", default-features = false, features = ["serde"] } sp-std = { package = "sp-std", path = "../../substrate/primitives/std", default-features = false } @@ -53,6 +53,7 @@ std = [ "sp-consensus-slots/std", "sp-io/std", "sp-keystore", + "sp-keystore?/std", "sp-staking/std", "sp-std/std", ] diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index dff8549f29f3..402c6e487a1f 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-session = { path = "../../../substrate/primitives/session", default-features = false } sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } -sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true } +sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true, default-features = false } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false, optional = true } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false, optional = true } sp-arithmetic = { path = "../../../substrate/primitives/arithmetic", default-features = false } @@ -108,6 +108,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-keystore", + "sp-keystore?/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 1d1ee40d092c..279d7118f8cf 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#![cfg_attr(not(feature = "std"), no_std)] #![cfg(test)] use codec::Encode; diff --git a/prdoc/pr_4060.prdoc b/prdoc/pr_4060.prdoc new file mode 100644 index 000000000000..621620a44893 --- /dev/null +++ b/prdoc/pr_4060.prdoc @@ -0,0 +1,54 @@ +title: "Fix nostd build of several crates" + +doc: + - audience: Runtime Dev + description: | + Fixes feature and dependency configuration of several crate. This should allow for better no-std build capabilities. + +crates: + - name: cumulus-pallet-session-benchmarking + bump: patch + - name: asset-hub-rococo-runtime + bump: patch + - name: glutton-westend-runtime + bump: patch + - name: cumulus-primitives-parachain-inherent + bump: patch + - name: polkadot-primitives + bump: patch + - name: polkadot-runtime-parachains + bump: patch + - name: xcm-executor-integration-tests + bump: patch + - name: pallet-atomic-swap + bump: patch + - name: pallet-election-provider-support-benchmarking + bump: patch + - name: pallet-dev-mode + bump: patch + - name: pallet-example-offchain-worker + bump: patch + - name: pallet-indices + bump: patch + - name: pallet-nomination-pools + bump: patch + - name: pallet-nomination-pools-benchmarking + bump: patch + - name: pallet-offences-benchmarking + bump: patch + - name: pallet-root-offences + bump: patch + - name: pallet-session-benchmarking + bump: patch + - name: frame-system-benchmarking + bump: patch + - name: sp-consensus-babe + bump: patch + - name: sp-consensus-babe + bump: patch + - name: sp-core + bump: patch + - name: sp-session + bump: patch + - name: sp-transaction-storage-proof + bump: patch diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 919d6d17ce8b..84bab86581ca 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" license = "Apache-2.0" homepage = "paritytech.github.io" repository.workspace = true -description = "The single package to get you started with building frame pallets and runtimes" +description = "Experimental: The single package to get you started with building frame pallets and runtimes" publish = false [lints] diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs index 609903e67e3e..dc0300dc1a5c 100644 --- a/substrate/frame/atomic-swap/src/lib.rs +++ b/substrate/frame/atomic-swap/src/lib.rs @@ -58,6 +58,7 @@ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeDebug; +use sp_std::vec::Vec; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode, TypeInfo, MaxEncodedLen)] diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs new file mode 100644 index 000000000000..4722680cfcc1 --- /dev/null +++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs @@ -0,0 +1,89 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Election provider support pallet benchmarking. +//! This is separated into its own crate to avoid bloating the size of the runtime. + +use codec::Decode; +use frame_benchmarking::v1::benchmarks; +use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; +use sp_std::vec::Vec; + +pub struct Pallet(frame_system::Pallet); +pub trait Config: frame_system::Config {} + +const VOTERS: [u32; 2] = [1_000, 2_000]; +const TARGETS: [u32; 2] = [500, 1_000]; +const VOTES_PER_VOTER: [u32; 2] = [5, 16]; + +const SEED: u32 = 999; +fn set_up_voters_targets( + voters_len: u32, + targets_len: u32, + degree: usize, +) -> (Vec<(AccountId, u64, impl IntoIterator)>, Vec) { + // fill targets. + let mut targets = (0..targets_len) + .map(|i| frame_benchmarking::account::("Target", i, SEED)) + .collect::>(); + assert!(targets.len() > degree, "we should always have enough voters to fill"); + targets.truncate(degree); + + // fill voters. + let voters = (0..voters_len) + .map(|i| { + let voter = frame_benchmarking::account::("Voter", i, SEED); + (voter, 1_000, targets.clone()) + }) + .collect::>(); + + (voters, targets) +} + +benchmarks! { + phragmen { + // number of votes in snapshot. + let v in (VOTERS[0]) .. VOTERS[1]; + // number of targets in snapshot. + let t in (TARGETS[0]) .. TARGETS[1]; + // number of votes per voter (ie the degree). + let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; + + let (voters, targets) = set_up_voters_targets::(v, t, d as usize); + }: { + assert!( + SequentialPhragmen:: + ::solve(d as usize, targets, voters).is_ok() + ); + } + + phragmms { + // number of votes in snapshot. + let v in (VOTERS[0]) .. VOTERS[1]; + // number of targets in snapshot. + let t in (TARGETS[0]) .. TARGETS[1]; + // number of votes per voter (ie the degree). + let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; + + let (voters, targets) = set_up_voters_targets::(v, t, d as usize); + }: { + assert!( + PhragMMS:: + ::solve(d as usize, targets, voters).is_ok() + ); + } +} diff --git a/substrate/frame/election-provider-support/benchmarking/src/lib.rs b/substrate/frame/election-provider-support/benchmarking/src/lib.rs index 6c75aed0a911..78b226e52af6 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/lib.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/lib.rs @@ -16,77 +16,11 @@ // limitations under the License. //! Election provider support pallet benchmarking. -//! This is separated into its own crate to avoid bloating the size of the runtime. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -use codec::Decode; -use frame_benchmarking::v1::benchmarks; -use frame_election_provider_support::{NposSolver, PhragMMS, SequentialPhragmen}; -use sp_std::vec::Vec; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -pub struct Pallet(frame_system::Pallet); -pub trait Config: frame_system::Config {} - -const VOTERS: [u32; 2] = [1_000, 2_000]; -const TARGETS: [u32; 2] = [500, 1_000]; -const VOTES_PER_VOTER: [u32; 2] = [5, 16]; - -const SEED: u32 = 999; -fn set_up_voters_targets( - voters_len: u32, - targets_len: u32, - degree: usize, -) -> (Vec<(AccountId, u64, impl IntoIterator)>, Vec) { - // fill targets. - let mut targets = (0..targets_len) - .map(|i| frame_benchmarking::account::("Target", i, SEED)) - .collect::>(); - assert!(targets.len() > degree, "we should always have enough voters to fill"); - targets.truncate(degree); - - // fill voters. - let voters = (0..voters_len) - .map(|i| { - let voter = frame_benchmarking::account::("Voter", i, SEED); - (voter, 1_000, targets.clone()) - }) - .collect::>(); - - (voters, targets) -} - -benchmarks! { - phragmen { - // number of votes in snapshot. - let v in (VOTERS[0]) .. VOTERS[1]; - // number of targets in snapshot. - let t in (TARGETS[0]) .. TARGETS[1]; - // number of votes per voter (ie the degree). - let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; - - let (voters, targets) = set_up_voters_targets::(v, t, d as usize); - }: { - assert!( - SequentialPhragmen:: - ::solve(d as usize, targets, voters).is_ok() - ); - } - - phragmms { - // number of votes in snapshot. - let v in (VOTERS[0]) .. VOTERS[1]; - // number of targets in snapshot. - let t in (TARGETS[0]) .. TARGETS[1]; - // number of votes per voter (ie the degree). - let d in (VOTES_PER_VOTER[0]) .. VOTES_PER_VOTER[1]; - - let (voters, targets) = set_up_voters_targets::(v, t, d as usize); - }: { - assert!( - PhragMMS:: - ::solve(d as usize, targets, voters).is_ok() - ); - } -} +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; diff --git a/substrate/frame/examples/dev-mode/src/lib.rs b/substrate/frame/examples/dev-mode/src/lib.rs index d57e7a5b76b8..15f1a4b5d619 100644 --- a/substrate/frame/examples/dev-mode/src/lib.rs +++ b/substrate/frame/examples/dev-mode/src/lib.rs @@ -30,6 +30,7 @@ use frame_support::dispatch::DispatchResult; use frame_system::ensure_signed; +use sp_std::{vec, vec::Vec}; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index 468af0345cae..9363f7533526 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -24,7 +24,7 @@ frame-support = { path = "../../support", default-features = false } frame-system = { path = "../../system", default-features = false } sp-core = { path = "../../../primitives/core", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } -sp-keystore = { path = "../../../primitives/keystore", optional = true } +sp-keystore = { path = "../../../primitives/keystore", optional = true, default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index 7b14bf358f1e..8684f347270f 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -23,7 +23,7 @@ frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-keyring = { path = "../../primitives/keyring", optional = true, default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } @@ -42,6 +42,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-keyring", + "sp-keyring?/std", "sp-runtime/std", "sp-std/std", ] diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 55e9ef6fbd33..eddcc8e4e1dd 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -34,8 +34,8 @@ sp-io = { path = "../../primitives/io", default-features = false } log = { workspace = true } # Optional: use for testing and/or fuzzing -pallet-balances = { path = "../balances", optional = true } -sp-tracing = { path = "../../primitives/tracing", optional = true } +pallet-balances = { path = "../balances", optional = true, default-features = false } +sp-tracing = { path = "../../primitives/tracing", optional = true, default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } diff --git a/substrate/frame/nomination-pools/benchmarking/src/inner.rs b/substrate/frame/nomination-pools/benchmarking/src/inner.rs new file mode 100644 index 000000000000..277060e7f640 --- /dev/null +++ b/substrate/frame/nomination-pools/benchmarking/src/inner.rs @@ -0,0 +1,846 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the nomination pools coupled with the staking and bags list pallets. + +use frame_benchmarking::v1::{account, whitelist_account}; +use frame_election_provider_support::SortedListProvider; +use frame_support::{ + assert_ok, ensure, + traits::{ + fungible::{Inspect, Mutate, Unbalanced}, + Get, + }, +}; +use frame_system::RawOrigin as RuntimeOrigin; +use pallet_nomination_pools::{ + BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, + Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, + MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, + Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, +}; +use pallet_staking::MaxNominationsOf; +use sp_runtime::{ + traits::{Bounded, StaticLookup, Zero}, + Perbill, +}; +use sp_staking::{EraIndex, StakingInterface}; +use sp_std::{vec, vec::Vec}; +// `frame_benchmarking::benchmarks!` macro needs this +use pallet_nomination_pools::Call; + +type CurrencyOf = ::Currency; + +const USER_SEED: u32 = 0; +const MAX_SPANS: u32 = 100; + +pub(crate) type VoterBagsListInstance = pallet_bags_list::Instance1; +pub trait Config: + pallet_nomination_pools::Config + + pallet_staking::Config + + pallet_bags_list::Config +{ +} + +pub struct Pallet(Pools); + +fn create_funded_user_with_balance( + string: &'static str, + n: u32, + balance: BalanceOf, +) -> T::AccountId { + let user = account(string, n, USER_SEED); + T::Currency::set_balance(&user, balance); + user +} + +// Create a bonded pool account, bonding `balance` and giving the account `balance * 2` free +// balance. +fn create_pool_account( + n: u32, + balance: BalanceOf, + commission: Option, +) -> (T::AccountId, T::AccountId) { + let ed = CurrencyOf::::minimum_balance(); + let pool_creator: T::AccountId = + create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); + let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); + + Pools::::create( + RuntimeOrigin::Signed(pool_creator.clone()).into(), + balance, + pool_creator_lookup.clone(), + pool_creator_lookup.clone(), + pool_creator_lookup, + ) + .unwrap(); + + if let Some(c) = commission { + let pool_id = pallet_nomination_pools::LastPoolId::::get(); + Pools::::set_commission( + RuntimeOrigin::Signed(pool_creator.clone()).into(), + pool_id, + Some((c, pool_creator.clone())), + ) + .expect("pool just created, commission can be set by root; qed"); + } + + let pool_account = pallet_nomination_pools::BondedPools::::iter() + .find(|(_, bonded_pool)| bonded_pool.roles.depositor == pool_creator) + .map(|(pool_id, _)| Pools::::create_bonded_account(pool_id)) + .expect("pool_creator created a pool above"); + + (pool_creator, pool_account) +} + +fn vote_to_balance( + vote: u64, +) -> Result, &'static str> { + vote.try_into().map_err(|_| "could not convert u64 to Balance") +} + +#[allow(unused)] +struct ListScenario { + /// Stash/Controller that is expected to be moved. + origin1: T::AccountId, + creator1: T::AccountId, + dest_weight: BalanceOf, + origin1_member: Option, +} + +impl ListScenario { + /// An expensive scenario for bags-list implementation: + /// + /// - the node to be updated (r) is the head of a bag that has at least one other node. The bag + /// itself will need to be read and written to update its head. The node pointed to by r.next + /// will need to be read and written as it will need to have its prev pointer updated. Note + /// that there are two other worst case scenarios for bag removal: 1) the node is a tail and + /// 2) the node is a middle node with prev and next; all scenarios end up with the same number + /// of storage reads and writes. + /// + /// - the destination bag has at least one node, which will need its next pointer updated. + pub(crate) fn new( + origin_weight: BalanceOf, + is_increase: bool, + ) -> Result { + ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); + + ensure!( + pallet_nomination_pools::MaxPools::::get().unwrap_or(0) >= 3, + "must allow at least three pools for benchmarks" + ); + + // Burn the entire issuance. + CurrencyOf::::set_total_issuance(Zero::zero()); + + // Create accounts with the origin weight + let (pool_creator1, pool_origin1) = + create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); + + T::Staking::nominate( + &pool_origin1, + // NOTE: these don't really need to be validators. + vec![account("random_validator", 0, USER_SEED)], + )?; + + let (_, pool_origin2) = + create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); + + T::Staking::nominate( + &pool_origin2, + vec![account("random_validator", 0, USER_SEED)].clone(), + )?; + + // Find a destination weight that will trigger the worst case scenario + let dest_weight_as_vote = ::VoterList::score_update_worst_case( + &pool_origin1, + is_increase, + ); + + let dest_weight: BalanceOf = + dest_weight_as_vote.try_into().map_err(|_| "could not convert u64 to Balance")?; + + // Create an account with the worst case destination weight + let (_, pool_dest1) = + create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); + + T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; + + let weight_of = pallet_staking::Pallet::::weight_of_fn(); + assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); + assert_eq!(vote_to_balance::(weight_of(&pool_origin2)).unwrap(), origin_weight); + assert_eq!(vote_to_balance::(weight_of(&pool_dest1)).unwrap(), dest_weight); + + Ok(ListScenario { + origin1: pool_origin1, + creator1: pool_creator1, + dest_weight, + origin1_member: None, + }) + } + + fn add_joiner(mut self, amount: BalanceOf) -> Self { + let amount = MinJoinBond::::get() + .max(CurrencyOf::::minimum_balance()) + // Max `amount` with minimum thresholds for account balance and joining a pool + // to ensure 1) the user can be created and 2) can join the pool + .max(amount); + + let joiner: T::AccountId = account("joiner", USER_SEED, 0); + self.origin1_member = Some(joiner.clone()); + CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); + + let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); + + // Unbond `amount` from the underlying pool account so when the member joins + // we will maintain `current_bonded`. + T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); + + // Account pool points for the unbonded balance. + BondedPools::::mutate(&1, |maybe_pool| { + maybe_pool.as_mut().map(|pool| pool.points -= amount) + }); + + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), amount, 1).unwrap(); + + // check that the vote weight is still the same as the original bonded + let weight_of = pallet_staking::Pallet::::weight_of_fn(); + assert_eq!(vote_to_balance::(weight_of(&self.origin1)).unwrap(), original_bonded); + + // check the member was added correctly + let member = PoolMembers::::get(&joiner).unwrap(); + assert_eq!(member.points, amount); + assert_eq!(member.pool_id, 1); + + self + } +} + +frame_benchmarking::benchmarks! { + join { + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + + // setup the worst case list scenario. + let scenario = ListScenario::::new(origin_weight, true)?; + assert_eq!( + T::Staking::active_stake(&scenario.origin1).unwrap(), + origin_weight + ); + + let max_additional = scenario.dest_weight - origin_weight; + let joiner_free = CurrencyOf::::minimum_balance() + max_additional; + + let joiner: T::AccountId + = create_funded_user_with_balance::("joiner", 0, joiner_free); + + whitelist_account!(joiner); + }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) + verify { + assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); + assert_eq!( + T::Staking::active_stake(&scenario.origin1).unwrap(), + scenario.dest_weight + ); + } + + bond_extra_transfer { + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let scenario = ListScenario::::new(origin_weight, true)?; + let extra = scenario.dest_weight - origin_weight; + + // creator of the src pool will bond-extra, bumping itself to dest bag. + + }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) + verify { + assert!( + T::Staking::active_stake(&scenario.origin1).unwrap() >= + scenario.dest_weight + ); + } + + bond_extra_other { + let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); + + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let scenario = ListScenario::::new(origin_weight, true)?; + let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); + + // set claim preferences to `PermissionlessAll` to any account to bond extra on member's behalf. + let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); + + // transfer exactly `extra` to the depositor of the src pool (1), + let reward_account1 = Pools::::create_reward_account(1); + assert!(extra >= CurrencyOf::::minimum_balance()); + let _ = CurrencyOf::::mint_into(&reward_account1, extra); + + }: _(RuntimeOrigin::Signed(claimer), T::Lookup::unlookup(scenario.creator1.clone()), BondExtra::Rewards) + verify { + // commission of 50% deducted here. + assert!( + T::Staking::active_stake(&scenario.origin1).unwrap() >= + scenario.dest_weight / 2u32.into() + ); + } + + claim_payout { + let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); + let commission = Perbill::from_percent(50); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let ed = CurrencyOf::::minimum_balance(); + let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); + let reward_account = Pools::::create_reward_account(1); + + // Send funds to the reward account of the pool + CurrencyOf::::set_balance(&reward_account, ed + origin_weight); + + // set claim preferences to `PermissionlessAll` so any account can claim rewards on member's + // behalf. + let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(depositor.clone()).into(), ClaimPermission::PermissionlessAll); + + // Sanity check + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + ); + + whitelist_account!(depositor); + }:claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()) + verify { + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + commission * origin_weight + ); + assert_eq!( + CurrencyOf::::balance(&reward_account), + ed + commission * origin_weight + ); + } + + + unbond { + // The weight the nominator will start at. The value used here is expected to be + // significantly higher than the first position in a list (e.g. the first bag threshold). + let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); + let scenario = ListScenario::::new(origin_weight, false)?; + let amount = origin_weight - scenario.dest_weight; + + let scenario = scenario.add_joiner(amount); + let member_id = scenario.origin1_member.unwrap().clone(); + let member_id_lookup = T::Lookup::unlookup(member_id.clone()); + let all_points = PoolMembers::::get(&member_id).unwrap().points; + whitelist_account!(member_id); + }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) + verify { + let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); + // We at least went down to the destination bag + assert!(bonded_after <= scenario.dest_weight); + let member = PoolMembers::::get( + &member_id + ) + .unwrap(); + assert_eq!( + member.unbonding_eras.keys().cloned().collect::>(), + vec![0 + T::Staking::bonding_duration()] + ); + assert_eq!( + member.unbonding_eras.values().cloned().collect::>(), + vec![all_points] + ); + } + + pool_withdraw_unbonded { + let s in 0 .. MAX_SPANS; + + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Add a new member + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); + + // Sanity check join worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + min_join_bond + ); + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); + + // Unbond the new member + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + + // Sanity check that unbond worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + ); + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); + // Set the current era + pallet_staking::CurrentEra::::put(EraIndex::max_value()); + + // Add `s` count of slashing spans to storage. + pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); + whitelist_account!(pool_account); + }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) + verify { + // The joiners funds didn't change + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); + // The unlocking chunk was removed + assert_eq!(pallet_staking::Ledger::::get(pool_account).unwrap().unlocking.len(), 0); + } + + withdraw_unbonded_update { + let s in 0 .. MAX_SPANS; + + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Add a new member + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); + + // Sanity check join worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + min_join_bond + ); + assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); + + // Unbond the new member + pallet_staking::CurrentEra::::put(0); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + + // Sanity check that unbond worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + ); + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); + + // Set the current era to ensure we can withdraw unbonded funds + pallet_staking::CurrentEra::::put(EraIndex::max_value()); + + pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); + whitelist_account!(joiner); + }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) + verify { + assert_eq!( + CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into() + ); + // The unlocking chunk was removed + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 0); + } + + withdraw_unbonded_kill { + let s in 0 .. MAX_SPANS; + + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // We set the pool to the destroying state so the depositor can leave + BondedPools::::try_mutate(&1, |maybe_bonded_pool| { + maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { + bonded_pool.state = PoolState::Destroying; + }) + }) + .unwrap(); + + // Unbond the creator + pallet_staking::CurrentEra::::put(0); + // Simulate some rewards so we can check if the rewards storage is cleaned up. We check this + // here to ensure the complete flow for destroying a pool works - the reward pool account + // should never exist by time the depositor withdraws so we test that it gets cleaned + // up when unbonding. + let reward_account = Pools::::create_reward_account(1); + assert!(frame_system::Account::::contains_key(&reward_account)); + Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); + + // Sanity check that unbond worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + Zero::zero() + ); + assert_eq!( + CurrencyOf::::balance(&pool_account), + min_create_bond + ); + assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); + + // Set the current era to ensure we can withdraw unbonded funds + pallet_staking::CurrentEra::::put(EraIndex::max_value()); + + // Some last checks that storage items we expect to get cleaned up are present + assert!(pallet_staking::Ledger::::contains_key(&pool_account)); + assert!(BondedPools::::contains_key(&1)); + assert!(SubPoolsStorage::::contains_key(&1)); + assert!(RewardPools::::contains_key(&1)); + assert!(PoolMembers::::contains_key(&depositor)); + assert!(frame_system::Account::::contains_key(&reward_account)); + + whitelist_account!(depositor); + }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) + verify { + // Pool removal worked + assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); + assert!(!BondedPools::::contains_key(&1)); + assert!(!SubPoolsStorage::::contains_key(&1)); + assert!(!RewardPools::::contains_key(&1)); + assert!(!PoolMembers::::contains_key(&depositor)); + assert!(!frame_system::Account::::contains_key(&pool_account)); + assert!(!frame_system::Account::::contains_key(&reward_account)); + + // Funds where transferred back correctly + assert_eq!( + CurrencyOf::::balance(&depositor), + // gets bond back + rewards collecting when unbonding + min_create_bond * 2u32.into() + CurrencyOf::::minimum_balance() + ); + } + + create { + let min_create_bond = Pools::::depositor_min_bond(); + let depositor: T::AccountId = account("depositor", USER_SEED, 0); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); + + // Give the depositor some balance to bond + CurrencyOf::::set_balance(&depositor, min_create_bond * 2u32.into()); + + // Make sure no Pools exist at a pre-condition for our verify checks + assert_eq!(RewardPools::::count(), 0); + assert_eq!(BondedPools::::count(), 0); + + whitelist_account!(depositor); + }: _( + RuntimeOrigin::Signed(depositor.clone()), + min_create_bond, + depositor_lookup.clone(), + depositor_lookup.clone(), + depositor_lookup + ) + verify { + assert_eq!(RewardPools::::count(), 1); + assert_eq!(BondedPools::::count(), 1); + let (_, new_pool) = BondedPools::::iter().next().unwrap(); + assert_eq!( + new_pool, + BondedPoolInner { + commission: Commission::default(), + member_counter: 1, + points: min_create_bond, + roles: PoolRoles { + depositor: depositor.clone(), + root: Some(depositor.clone()), + nominator: Some(depositor.clone()), + bouncer: Some(depositor.clone()), + }, + state: PoolState::Open, + } + ); + assert_eq!( + T::Staking::active_stake(&Pools::::create_bonded_account(1)), + Ok(min_create_bond) + ); + } + + nominate { + let n in 1 .. MaxNominationsOf::::get(); + + // Create a pool + let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Create some accounts to nominate. For the sake of benchmarking they don't need to be + // actual validators + let validators: Vec<_> = (0..n) + .map(|i| account("stash", USER_SEED, i)) + .collect(); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) + verify { + assert_eq!(RewardPools::::count(), 1); + assert_eq!(BondedPools::::count(), 1); + let (_, new_pool) = BondedPools::::iter().next().unwrap(); + assert_eq!( + new_pool, + BondedPoolInner { + commission: Commission::default(), + member_counter: 1, + points: min_create_bond, + roles: PoolRoles { + depositor: depositor.clone(), + root: Some(depositor.clone()), + nominator: Some(depositor.clone()), + bouncer: Some(depositor.clone()), + }, + state: PoolState::Open, + } + ); + assert_eq!( + T::Staking::active_stake(&Pools::::create_bonded_account(1)), + Ok(min_create_bond) + ); + } + + set_state { + // Create a pool + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + BondedPools::::mutate(&1, |maybe_pool| { + // Force the pool into an invalid state + maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); + }); + + let caller = account("caller", 0, USER_SEED); + whitelist_account!(caller); + }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) + verify { + assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); + } + + set_metadata { + let n in 1 .. ::MaxMetadataLen::get(); + + // Create a pool + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + // Create metadata of the max possible size + let metadata: Vec = (0..n).map(|_| 42).collect(); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) + verify { + assert_eq!(Metadata::::get(&1), metadata); + } + + set_configs { + }:_( + RuntimeOrigin::Root, + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(BalanceOf::::max_value()), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(u32::MAX), + ConfigOp::Set(Perbill::max_value()) + ) verify { + assert_eq!(MinJoinBond::::get(), BalanceOf::::max_value()); + assert_eq!(MinCreateBond::::get(), BalanceOf::::max_value()); + assert_eq!(MaxPools::::get(), Some(u32::MAX)); + assert_eq!(MaxPoolMembers::::get(), Some(u32::MAX)); + assert_eq!(MaxPoolMembersPerPool::::get(), Some(u32::MAX)); + assert_eq!(GlobalMaxCommission::::get(), Some(Perbill::max_value())); + } + + update_roles { + let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; + let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); + }:_( + RuntimeOrigin::Signed(root.clone()), + first_id, + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()), + ConfigOp::Set(random.clone()) + ) verify { + assert_eq!( + pallet_nomination_pools::BondedPools::::get(first_id).unwrap().roles, + pallet_nomination_pools::PoolRoles { + depositor: root, + nominator: Some(random.clone()), + bouncer: Some(random.clone()), + root: Some(random), + }, + ) + } + + chill { + // Create a pool + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + // Nominate with the pool. + let validators: Vec<_> = (0..MaxNominationsOf::::get()) + .map(|i| account("stash", USER_SEED, i)) + .collect(); + + assert_ok!(T::Staking::nominate(&pool_account, validators)); + assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1) + verify { + assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); + } + + set_commission { + // Create a pool - do not set a commission yet. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + // set a max commission + Pools::::set_commission_max(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), Perbill::from_percent(50)).unwrap(); + // set a change rate + Pools::::set_commission_change_rate(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), CommissionChangeRate { + max_increase: Perbill::from_percent(20), + min_delay: 0u32.into(), + }).unwrap(); + // set a claim permission to an account. + Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(depositor.clone())) + ).unwrap(); + + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) + verify { + assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { + current: Some((Perbill::from_percent(20), depositor.clone())), + max: Some(Perbill::from_percent(50)), + change_rate: Some(CommissionChangeRate { + max_increase: Perbill::from_percent(20), + min_delay: 0u32.into() + }), + throttle_from: Some(1u32.into()), + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); + } + + set_commission_max { + // Create a pool, setting a commission that will update when max commission is set. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), Some(Perbill::from_percent(50))); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: Some((Perbill::from_percent(50), depositor)), + max: Some(Perbill::from_percent(50)), + change_rate: None, + throttle_from: Some(0u32.into()), + claim_permission: None, + }); + } + + set_commission_change_rate { + // Create a pool + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), CommissionChangeRate { + max_increase: Perbill::from_percent(50), + min_delay: 1000u32.into(), + }) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: Some(CommissionChangeRate { + max_increase: Perbill::from_percent(50), + min_delay: 1000u32.into(), + }), + throttle_from: Some(1_u32.into()), + claim_permission: None, + }); + } + + set_commission_claim_permission { + // Create a pool. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: None, + throttle_from: None, + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); + } + + set_claim_permission { + // Create a pool + let min_create_bond = Pools::::depositor_min_bond(); + let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); + + // Join pool + let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); + let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 4u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) + .unwrap(); + + // Sanity check join worked + assert_eq!( + T::Staking::active_stake(&pool_account).unwrap(), + min_create_bond + min_join_bond + ); + }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) + verify { + assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); + } + + claim_commission { + let claimer: T::AccountId = account("claimer_member", USER_SEED + 4, 0); + let commission = Perbill::from_percent(50); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); + let ed = CurrencyOf::::minimum_balance(); + let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); + let reward_account = Pools::::create_reward_account(1); + CurrencyOf::::set_balance(&reward_account, ed + origin_weight); + + // member claims a payout to make some commission available. + let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer.clone()).into()); + // set a claim permission to an account. + let _ = Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(claimer)) + ); + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) + verify { + assert_eq!( + CurrencyOf::::balance(&depositor), + origin_weight + commission * origin_weight + ); + assert_eq!( + CurrencyOf::::balance(&reward_account), + ed + commission * origin_weight + ); + } + + adjust_pool_deposit { + // Create a pool + let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + + // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. + let _ = Pools::::unfreeze_pool_deposit(&Pools::::create_reward_account(1)); + assert!(&Pools::::check_ed_imbalance().is_err()); + + whitelist_account!(depositor); + }:_(RuntimeOrigin::Signed(depositor), 1) + verify { + assert!(&Pools::::check_ed_imbalance().is_ok()); + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(), + crate::mock::Runtime + ); +} diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index f7df173ec04e..45e8f1f27e99 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -17,836 +17,13 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(test)] -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -use frame_benchmarking::v1::{account, whitelist_account}; -use frame_election_provider_support::SortedListProvider; -use frame_support::{ - assert_ok, ensure, - traits::{ - fungible::{Inspect, Mutate, Unbalanced}, - Get, - }, -}; -use frame_system::RawOrigin as RuntimeOrigin; -use pallet_nomination_pools::{ - BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, - Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, - MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, - Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, -}; -use pallet_staking::MaxNominationsOf; -use sp_runtime::{ - traits::{Bounded, StaticLookup, Zero}, - Perbill, -}; -use sp_staking::{EraIndex, StakingInterface}; -use sp_std::{vec, vec::Vec}; -// `frame_benchmarking::benchmarks!` macro needs this -use pallet_nomination_pools::Call; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -type CurrencyOf = ::Currency; - -const USER_SEED: u32 = 0; -const MAX_SPANS: u32 = 100; - -type VoterBagsListInstance = pallet_bags_list::Instance1; -pub trait Config: - pallet_nomination_pools::Config - + pallet_staking::Config - + pallet_bags_list::Config -{ -} - -pub struct Pallet(Pools); - -fn create_funded_user_with_balance( - string: &'static str, - n: u32, - balance: BalanceOf, -) -> T::AccountId { - let user = account(string, n, USER_SEED); - T::Currency::set_balance(&user, balance); - user -} - -// Create a bonded pool account, bonding `balance` and giving the account `balance * 2` free -// balance. -fn create_pool_account( - n: u32, - balance: BalanceOf, - commission: Option, -) -> (T::AccountId, T::AccountId) { - let ed = CurrencyOf::::minimum_balance(); - let pool_creator: T::AccountId = - create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); - let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); - - Pools::::create( - RuntimeOrigin::Signed(pool_creator.clone()).into(), - balance, - pool_creator_lookup.clone(), - pool_creator_lookup.clone(), - pool_creator_lookup, - ) - .unwrap(); - - if let Some(c) = commission { - let pool_id = pallet_nomination_pools::LastPoolId::::get(); - Pools::::set_commission( - RuntimeOrigin::Signed(pool_creator.clone()).into(), - pool_id, - Some((c, pool_creator.clone())), - ) - .expect("pool just created, commission can be set by root; qed"); - } - - let pool_account = pallet_nomination_pools::BondedPools::::iter() - .find(|(_, bonded_pool)| bonded_pool.roles.depositor == pool_creator) - .map(|(pool_id, _)| Pools::::create_bonded_account(pool_id)) - .expect("pool_creator created a pool above"); - - (pool_creator, pool_account) -} - -fn vote_to_balance( - vote: u64, -) -> Result, &'static str> { - vote.try_into().map_err(|_| "could not convert u64 to Balance") -} - -#[allow(unused)] -struct ListScenario { - /// Stash/Controller that is expected to be moved. - origin1: T::AccountId, - creator1: T::AccountId, - dest_weight: BalanceOf, - origin1_member: Option, -} - -impl ListScenario { - /// An expensive scenario for bags-list implementation: - /// - /// - the node to be updated (r) is the head of a bag that has at least one other node. The bag - /// itself will need to be read and written to update its head. The node pointed to by r.next - /// will need to be read and written as it will need to have its prev pointer updated. Note - /// that there are two other worst case scenarios for bag removal: 1) the node is a tail and - /// 2) the node is a middle node with prev and next; all scenarios end up with the same number - /// of storage reads and writes. - /// - /// - the destination bag has at least one node, which will need its next pointer updated. - pub(crate) fn new( - origin_weight: BalanceOf, - is_increase: bool, - ) -> Result { - ensure!(!origin_weight.is_zero(), "origin weight must be greater than 0"); - - ensure!( - pallet_nomination_pools::MaxPools::::get().unwrap_or(0) >= 3, - "must allow at least three pools for benchmarks" - ); - - // Burn the entire issuance. - CurrencyOf::::set_total_issuance(Zero::zero()); - - // Create accounts with the origin weight - let (pool_creator1, pool_origin1) = - create_pool_account::(USER_SEED + 1, origin_weight, Some(Perbill::from_percent(50))); - - T::Staking::nominate( - &pool_origin1, - // NOTE: these don't really need to be validators. - vec![account("random_validator", 0, USER_SEED)], - )?; - - let (_, pool_origin2) = - create_pool_account::(USER_SEED + 2, origin_weight, Some(Perbill::from_percent(50))); - - T::Staking::nominate( - &pool_origin2, - vec![account("random_validator", 0, USER_SEED)].clone(), - )?; - - // Find a destination weight that will trigger the worst case scenario - let dest_weight_as_vote = ::VoterList::score_update_worst_case( - &pool_origin1, - is_increase, - ); - - let dest_weight: BalanceOf = - dest_weight_as_vote.try_into().map_err(|_| "could not convert u64 to Balance")?; - - // Create an account with the worst case destination weight - let (_, pool_dest1) = - create_pool_account::(USER_SEED + 3, dest_weight, Some(Perbill::from_percent(50))); - - T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; - - let weight_of = pallet_staking::Pallet::::weight_of_fn(); - assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); - assert_eq!(vote_to_balance::(weight_of(&pool_origin2)).unwrap(), origin_weight); - assert_eq!(vote_to_balance::(weight_of(&pool_dest1)).unwrap(), dest_weight); - - Ok(ListScenario { - origin1: pool_origin1, - creator1: pool_creator1, - dest_weight, - origin1_member: None, - }) - } - - fn add_joiner(mut self, amount: BalanceOf) -> Self { - let amount = MinJoinBond::::get() - .max(CurrencyOf::::minimum_balance()) - // Max `amount` with minimum thresholds for account balance and joining a pool - // to ensure 1) the user can be created and 2) can join the pool - .max(amount); - - let joiner: T::AccountId = account("joiner", USER_SEED, 0); - self.origin1_member = Some(joiner.clone()); - CurrencyOf::::set_balance(&joiner, amount * 2u32.into()); - - let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); - - // Unbond `amount` from the underlying pool account so when the member joins - // we will maintain `current_bonded`. - T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); - - // Account pool points for the unbonded balance. - BondedPools::::mutate(&1, |maybe_pool| { - maybe_pool.as_mut().map(|pool| pool.points -= amount) - }); - - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), amount, 1).unwrap(); - - // check that the vote weight is still the same as the original bonded - let weight_of = pallet_staking::Pallet::::weight_of_fn(); - assert_eq!(vote_to_balance::(weight_of(&self.origin1)).unwrap(), original_bonded); - - // check the member was added correctly - let member = PoolMembers::::get(&joiner).unwrap(); - assert_eq!(member.points, amount); - assert_eq!(member.pool_id, 1); - - self - } -} - -frame_benchmarking::benchmarks! { - join { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - - // setup the worst case list scenario. - let scenario = ListScenario::::new(origin_weight, true)?; - assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), - origin_weight - ); - - let max_additional = scenario.dest_weight - origin_weight; - let joiner_free = CurrencyOf::::minimum_balance() + max_additional; - - let joiner: T::AccountId - = create_funded_user_with_balance::("joiner", 0, joiner_free); - - whitelist_account!(joiner); - }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) - verify { - assert_eq!(CurrencyOf::::balance(&joiner), joiner_free - max_additional); - assert_eq!( - T::Staking::active_stake(&scenario.origin1).unwrap(), - scenario.dest_weight - ); - } - - bond_extra_transfer { - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true)?; - let extra = scenario.dest_weight - origin_weight; - - // creator of the src pool will bond-extra, bumping itself to dest bag. - - }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) - verify { - assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= - scenario.dest_weight - ); - } - - bond_extra_other { - let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); - - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let scenario = ListScenario::::new(origin_weight, true)?; - let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); - - // set claim preferences to `PermissionlessAll` to any account to bond extra on member's behalf. - let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(scenario.creator1.clone()).into(), ClaimPermission::PermissionlessAll); - - // transfer exactly `extra` to the depositor of the src pool (1), - let reward_account1 = Pools::::create_reward_account(1); - assert!(extra >= CurrencyOf::::minimum_balance()); - let _ = CurrencyOf::::mint_into(&reward_account1, extra); - - }: _(RuntimeOrigin::Signed(claimer), T::Lookup::unlookup(scenario.creator1.clone()), BondExtra::Rewards) - verify { - // commission of 50% deducted here. - assert!( - T::Staking::active_stake(&scenario.origin1).unwrap() >= - scenario.dest_weight / 2u32.into() - ); - } - - claim_payout { - let claimer: T::AccountId = account("claimer", USER_SEED + 4, 0); - let commission = Perbill::from_percent(50); - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let ed = CurrencyOf::::minimum_balance(); - let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); - - // Send funds to the reward account of the pool - CurrencyOf::::set_balance(&reward_account, ed + origin_weight); - - // set claim preferences to `PermissionlessAll` so any account can claim rewards on member's - // behalf. - let _ = Pools::::set_claim_permission(RuntimeOrigin::Signed(depositor.clone()).into(), ClaimPermission::PermissionlessAll); - - // Sanity check - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight - ); - - whitelist_account!(depositor); - }:claim_payout_other(RuntimeOrigin::Signed(claimer), depositor.clone()) - verify { - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight + commission * origin_weight - ); - assert_eq!( - CurrencyOf::::balance(&reward_account), - ed + commission * origin_weight - ); - } - - - unbond { - // The weight the nominator will start at. The value used here is expected to be - // significantly higher than the first position in a list (e.g. the first bag threshold). - let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); - let scenario = ListScenario::::new(origin_weight, false)?; - let amount = origin_weight - scenario.dest_weight; - - let scenario = scenario.add_joiner(amount); - let member_id = scenario.origin1_member.unwrap().clone(); - let member_id_lookup = T::Lookup::unlookup(member_id.clone()); - let all_points = PoolMembers::::get(&member_id).unwrap().points; - whitelist_account!(member_id); - }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) - verify { - let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); - // We at least went down to the destination bag - assert!(bonded_after <= scenario.dest_weight); - let member = PoolMembers::::get( - &member_id - ) - .unwrap(); - assert_eq!( - member.unbonding_eras.keys().cloned().collect::>(), - vec![0 + T::Staking::bonding_duration()] - ); - assert_eq!( - member.unbonding_eras.values().cloned().collect::>(), - vec![all_points] - ); - } - - pool_withdraw_unbonded { - let s in 0 .. MAX_SPANS; - - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Add a new member - let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); - let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); - - // Sanity check join worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond + min_join_bond - ); - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); - - // Unbond the new member - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); - - // Sanity check that unbond worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond - ); - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); - // Set the current era - pallet_staking::CurrentEra::::put(EraIndex::max_value()); - - // Add `s` count of slashing spans to storage. - pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); - whitelist_account!(pool_account); - }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) - verify { - // The joiners funds didn't change - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); - // The unlocking chunk was removed - assert_eq!(pallet_staking::Ledger::::get(pool_account).unwrap().unlocking.len(), 0); - } - - withdraw_unbonded_update { - let s in 0 .. MAX_SPANS; - - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Add a new member - let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); - let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); - - // Sanity check join worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond + min_join_bond - ); - assert_eq!(CurrencyOf::::balance(&joiner), min_join_bond); - - // Unbond the new member - pallet_staking::CurrentEra::::put(0); - Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); - - // Sanity check that unbond worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond - ); - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); - - // Set the current era to ensure we can withdraw unbonded funds - pallet_staking::CurrentEra::::put(EraIndex::max_value()); - - pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); - whitelist_account!(joiner); - }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) - verify { - assert_eq!( - CurrencyOf::::balance(&joiner), min_join_bond * 2u32.into() - ); - // The unlocking chunk was removed - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 0); - } - - withdraw_unbonded_kill { - let s in 0 .. MAX_SPANS; - - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - let depositor_lookup = T::Lookup::unlookup(depositor.clone()); - - // We set the pool to the destroying state so the depositor can leave - BondedPools::::try_mutate(&1, |maybe_bonded_pool| { - maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { - bonded_pool.state = PoolState::Destroying; - }) - }) - .unwrap(); - - // Unbond the creator - pallet_staking::CurrentEra::::put(0); - // Simulate some rewards so we can check if the rewards storage is cleaned up. We check this - // here to ensure the complete flow for destroying a pool works - the reward pool account - // should never exist by time the depositor withdraws so we test that it gets cleaned - // up when unbonding. - let reward_account = Pools::::create_reward_account(1); - assert!(frame_system::Account::::contains_key(&reward_account)); - Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); - - // Sanity check that unbond worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - Zero::zero() - ); - assert_eq!( - CurrencyOf::::balance(&pool_account), - min_create_bond - ); - assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); - - // Set the current era to ensure we can withdraw unbonded funds - pallet_staking::CurrentEra::::put(EraIndex::max_value()); - - // Some last checks that storage items we expect to get cleaned up are present - assert!(pallet_staking::Ledger::::contains_key(&pool_account)); - assert!(BondedPools::::contains_key(&1)); - assert!(SubPoolsStorage::::contains_key(&1)); - assert!(RewardPools::::contains_key(&1)); - assert!(PoolMembers::::contains_key(&depositor)); - assert!(frame_system::Account::::contains_key(&reward_account)); - - whitelist_account!(depositor); - }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) - verify { - // Pool removal worked - assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); - assert!(!BondedPools::::contains_key(&1)); - assert!(!SubPoolsStorage::::contains_key(&1)); - assert!(!RewardPools::::contains_key(&1)); - assert!(!PoolMembers::::contains_key(&depositor)); - assert!(!frame_system::Account::::contains_key(&pool_account)); - assert!(!frame_system::Account::::contains_key(&reward_account)); - - // Funds where transferred back correctly - assert_eq!( - CurrencyOf::::balance(&depositor), - // gets bond back + rewards collecting when unbonding - min_create_bond * 2u32.into() + CurrencyOf::::minimum_balance() - ); - } - - create { - let min_create_bond = Pools::::depositor_min_bond(); - let depositor: T::AccountId = account("depositor", USER_SEED, 0); - let depositor_lookup = T::Lookup::unlookup(depositor.clone()); - - // Give the depositor some balance to bond - CurrencyOf::::set_balance(&depositor, min_create_bond * 2u32.into()); - - // Make sure no Pools exist at a pre-condition for our verify checks - assert_eq!(RewardPools::::count(), 0); - assert_eq!(BondedPools::::count(), 0); - - whitelist_account!(depositor); - }: _( - RuntimeOrigin::Signed(depositor.clone()), - min_create_bond, - depositor_lookup.clone(), - depositor_lookup.clone(), - depositor_lookup - ) - verify { - assert_eq!(RewardPools::::count(), 1); - assert_eq!(BondedPools::::count(), 1); - let (_, new_pool) = BondedPools::::iter().next().unwrap(); - assert_eq!( - new_pool, - BondedPoolInner { - commission: Commission::default(), - member_counter: 1, - points: min_create_bond, - roles: PoolRoles { - depositor: depositor.clone(), - root: Some(depositor.clone()), - nominator: Some(depositor.clone()), - bouncer: Some(depositor.clone()), - }, - state: PoolState::Open, - } - ); - assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) - ); - } - - nominate { - let n in 1 .. MaxNominationsOf::::get(); - - // Create a pool - let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Create some accounts to nominate. For the sake of benchmarking they don't need to be - // actual validators - let validators: Vec<_> = (0..n) - .map(|i| account("stash", USER_SEED, i)) - .collect(); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) - verify { - assert_eq!(RewardPools::::count(), 1); - assert_eq!(BondedPools::::count(), 1); - let (_, new_pool) = BondedPools::::iter().next().unwrap(); - assert_eq!( - new_pool, - BondedPoolInner { - commission: Commission::default(), - member_counter: 1, - points: min_create_bond, - roles: PoolRoles { - depositor: depositor.clone(), - root: Some(depositor.clone()), - nominator: Some(depositor.clone()), - bouncer: Some(depositor.clone()), - }, - state: PoolState::Open, - } - ); - assert_eq!( - T::Staking::active_stake(&Pools::::create_bonded_account(1)), - Ok(min_create_bond) - ); - } - - set_state { - // Create a pool - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - BondedPools::::mutate(&1, |maybe_pool| { - // Force the pool into an invalid state - maybe_pool.as_mut().map(|pool| pool.points = min_create_bond * 10u32.into()); - }); - - let caller = account("caller", 0, USER_SEED); - whitelist_account!(caller); - }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) - verify { - assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); - } - - set_metadata { - let n in 1 .. ::MaxMetadataLen::get(); - - // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - // Create metadata of the max possible size - let metadata: Vec = (0..n).map(|_| 42).collect(); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) - verify { - assert_eq!(Metadata::::get(&1), metadata); - } - - set_configs { - }:_( - RuntimeOrigin::Root, - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(BalanceOf::::max_value()), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(u32::MAX), - ConfigOp::Set(Perbill::max_value()) - ) verify { - assert_eq!(MinJoinBond::::get(), BalanceOf::::max_value()); - assert_eq!(MinCreateBond::::get(), BalanceOf::::max_value()); - assert_eq!(MaxPools::::get(), Some(u32::MAX)); - assert_eq!(MaxPoolMembers::::get(), Some(u32::MAX)); - assert_eq!(MaxPoolMembersPerPool::::get(), Some(u32::MAX)); - assert_eq!(GlobalMaxCommission::::get(), Some(Perbill::max_value())); - } - - update_roles { - let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; - let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); - }:_( - RuntimeOrigin::Signed(root.clone()), - first_id, - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()), - ConfigOp::Set(random.clone()) - ) verify { - assert_eq!( - pallet_nomination_pools::BondedPools::::get(first_id).unwrap().roles, - pallet_nomination_pools::PoolRoles { - depositor: root, - nominator: Some(random.clone()), - bouncer: Some(random.clone()), - root: Some(random), - }, - ) - } - - chill { - // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - // Nominate with the pool. - let validators: Vec<_> = (0..MaxNominationsOf::::get()) - .map(|i| account("stash", USER_SEED, i)) - .collect(); - - assert_ok!(T::Staking::nominate(&pool_account, validators)); - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_some()); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1) - verify { - assert!(T::Staking::nominations(&Pools::::create_bonded_account(1)).is_none()); - } - - set_commission { - // Create a pool - do not set a commission yet. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - // set a max commission - Pools::::set_commission_max(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), Perbill::from_percent(50)).unwrap(); - // set a change rate - Pools::::set_commission_change_rate(RuntimeOrigin::Signed(depositor.clone()).into(), 1u32.into(), CommissionChangeRate { - max_increase: Perbill::from_percent(20), - min_delay: 0u32.into(), - }).unwrap(); - // set a claim permission to an account. - Pools::::set_commission_claim_permission( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - Some(CommissionClaimPermission::Account(depositor.clone())) - ).unwrap(); - - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) - verify { - assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(20), depositor.clone())), - max: Some(Perbill::from_percent(50)), - change_rate: Some(CommissionChangeRate { - max_increase: Perbill::from_percent(20), - min_delay: 0u32.into() - }), - throttle_from: Some(1u32.into()), - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - }); - } - - set_commission_max { - // Create a pool, setting a commission that will update when max commission is set. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), Some(Perbill::from_percent(50))); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Perbill::from_percent(50)) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(50), depositor)), - max: Some(Perbill::from_percent(50)), - change_rate: None, - throttle_from: Some(0u32.into()), - claim_permission: None, - }); - } - - set_commission_change_rate { - // Create a pool - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), CommissionChangeRate { - max_increase: Perbill::from_percent(50), - min_delay: 1000u32.into(), - }) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: None, - max: None, - change_rate: Some(CommissionChangeRate { - max_increase: Perbill::from_percent(50), - min_delay: 1000u32.into(), - }), - throttle_from: Some(1_u32.into()), - claim_permission: None, - }); - } - - set_commission_claim_permission { - // Create a pool. - let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) - verify { - assert_eq!( - BondedPools::::get(1).unwrap().commission, Commission { - current: None, - max: None, - change_rate: None, - throttle_from: None, - claim_permission: Some(CommissionClaimPermission::Account(depositor)), - }); - } - - set_claim_permission { - // Create a pool - let min_create_bond = Pools::::depositor_min_bond(); - let (depositor, pool_account) = create_pool_account::(0, min_create_bond, None); - - // Join pool - let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); - let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 4u32.into()); - let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) - .unwrap(); - - // Sanity check join worked - assert_eq!( - T::Staking::active_stake(&pool_account).unwrap(), - min_create_bond + min_join_bond - ); - }:_(RuntimeOrigin::Signed(joiner.clone()), ClaimPermission::Permissioned) - verify { - assert_eq!(ClaimPermissions::::get(joiner), ClaimPermission::Permissioned); - } - - claim_commission { - let claimer: T::AccountId = account("claimer_member", USER_SEED + 4, 0); - let commission = Perbill::from_percent(50); - let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); - let ed = CurrencyOf::::minimum_balance(); - let (depositor, pool_account) = create_pool_account::(0, origin_weight, Some(commission)); - let reward_account = Pools::::create_reward_account(1); - CurrencyOf::::set_balance(&reward_account, ed + origin_weight); - - // member claims a payout to make some commission available. - let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer.clone()).into()); - // set a claim permission to an account. - let _ = Pools::::set_commission_claim_permission( - RuntimeOrigin::Signed(depositor.clone()).into(), - 1u32.into(), - Some(CommissionClaimPermission::Account(claimer)) - ); - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) - verify { - assert_eq!( - CurrencyOf::::balance(&depositor), - origin_weight + commission * origin_weight - ); - assert_eq!( - CurrencyOf::::balance(&reward_account), - ed + commission * origin_weight - ); - } - - adjust_pool_deposit { - // Create a pool - let (depositor, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); - - // Remove ed freeze to create a scenario where the ed deposit needs to be adjusted. - let _ = Pools::::unfreeze_pool_deposit(&Pools::::create_reward_account(1)); - assert!(&Pools::::check_ed_imbalance().is_err()); - - whitelist_account!(depositor); - }:_(RuntimeOrigin::Signed(depositor), 1) - verify { - assert!(&Pools::::check_ed_imbalance().is_ok()); - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Runtime - ); -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs new file mode 100644 index 000000000000..9aa88f7a0d6d --- /dev/null +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -0,0 +1,250 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Offences pallet benchmarking. + +use sp_std::{prelude::*, vec}; + +use frame_benchmarking::v1::{account, benchmarks}; +use frame_support::traits::{Currency, Get}; +use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; + +use sp_runtime::{ + traits::{Convert, Saturating, StaticLookup}, + Perbill, +}; +use sp_staking::offence::ReportOffence; + +use pallet_babe::EquivocationOffence as BabeEquivocationOffence; +use pallet_balances::Config as BalancesConfig; +use pallet_grandpa::{ + EquivocationOffence as GrandpaEquivocationOffence, TimeSlot as GrandpaTimeSlot, +}; +use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; +use pallet_session::{ + historical::{Config as HistoricalConfig, IdentificationTuple}, + Config as SessionConfig, Pallet as Session, SessionManager, +}; +use pallet_staking::{ + Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, + RewardDestination, ValidatorPrefs, +}; + +const SEED: u32 = 0; + +const MAX_NOMINATORS: u32 = 100; + +pub struct Pallet(Offences); + +pub trait Config: + SessionConfig + + StakingConfig + + OffencesConfig + + HistoricalConfig + + BalancesConfig + + IdTupleConvert +{ +} + +/// A helper trait to make sure we can convert `IdentificationTuple` coming from historical +/// and the one required by offences. +pub trait IdTupleConvert { + /// Convert identification tuple from `historical` trait to the one expected by `offences`. + fn convert(id: IdentificationTuple) -> ::IdentificationTuple; +} + +impl IdTupleConvert for T +where + ::IdentificationTuple: From>, +{ + fn convert(id: IdentificationTuple) -> ::IdentificationTuple { + id.into() + } +} + +type LookupSourceOf = <::Lookup as StaticLookup>::Source; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +struct Offender { + pub controller: T::AccountId, + #[allow(dead_code)] + pub stash: T::AccountId, + #[allow(dead_code)] + pub nominator_stashes: Vec, +} + +fn bond_amount() -> BalanceOf { + T::Currency::minimum_balance().saturating_mul(10_000u32.into()) +} + +fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { + let stash: T::AccountId = account("stash", n, SEED); + let stash_lookup: LookupSourceOf = T::Lookup::unlookup(stash.clone()); + let reward_destination = RewardDestination::Staked; + let amount = bond_amount::(); + // add twice as much balance to prevent the account from being killed. + let free_amount = amount.saturating_mul(2u32.into()); + T::Currency::make_free_balance_be(&stash, free_amount); + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + amount, + reward_destination.clone(), + )?; + + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; + Staking::::validate(RawOrigin::Signed(stash.clone()).into(), validator_prefs)?; + + let mut individual_exposures = vec![]; + let mut nominator_stashes = vec![]; + // Create n nominators + for i in 0..nominators { + let nominator_stash: T::AccountId = + account("nominator stash", n * MAX_NOMINATORS + i, SEED); + T::Currency::make_free_balance_be(&nominator_stash, free_amount); + + Staking::::bond( + RawOrigin::Signed(nominator_stash.clone()).into(), + amount, + reward_destination.clone(), + )?; + + let selected_validators: Vec> = vec![stash_lookup.clone()]; + Staking::::nominate( + RawOrigin::Signed(nominator_stash.clone()).into(), + selected_validators, + )?; + + individual_exposures + .push(IndividualExposure { who: nominator_stash.clone(), value: amount }); + nominator_stashes.push(nominator_stash.clone()); + } + + let exposure = Exposure { total: amount * n.into(), own: amount, others: individual_exposures }; + let current_era = 0u32; + Staking::::add_era_stakers(current_era, stash.clone(), exposure); + + Ok(Offender { controller: stash.clone(), stash, nominator_stashes }) +} + +fn make_offenders( + num_offenders: u32, + num_nominators: u32, +) -> Result<(Vec>, Vec>), &'static str> { + Staking::::new_session(0); + + let mut offenders = vec![]; + for i in 0..num_offenders { + let offender = create_offender::(i + 1, num_nominators)?; + offenders.push(offender); + } + + Staking::::start_session(0); + + let id_tuples = offenders + .iter() + .map(|offender| { + ::ValidatorIdOf::convert(offender.controller.clone()) + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { + ::FullIdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification") + }) + .collect::>>(); + Ok((id_tuples, offenders)) +} + +benchmarks! { + report_offence_grandpa { + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + + // for grandpa equivocation reports the number of reporters + // and offenders is always 1 + let reporters = vec![account("reporter", 1, SEED)]; + + // make sure reporters actually get rewarded + Staking::::set_slash_reward_fraction(Perbill::one()); + + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let validator_set_count = Session::::validators().len() as u32; + + let offence = GrandpaEquivocationOffence { + time_slot: GrandpaTimeSlot { set_id: 0, round: 0 }, + session_index: 0, + validator_set_count, + offender: T::convert(offenders.pop().unwrap()), + }; + assert_eq!(System::::event_count(), 0); + }: { + let _ = Offences::::report_offence(reporters, offence); + } + verify { + // make sure that all slashes have been applied + #[cfg(test)] + assert_eq!( + System::::event_count(), 0 + + 1 // offence + + 3 // reporter (reward + endowment) + + 1 // offenders reported + + 3 // offenders slashed + + 1 // offenders chilled + + 3 * n // nominators slashed + ); + } + + report_offence_babe { + let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); + + // for babe equivocation reports the number of reporters + // and offenders is always 1 + let reporters = vec![account("reporter", 1, SEED)]; + + // make sure reporters actually get rewarded + Staking::::set_slash_reward_fraction(Perbill::one()); + + let (mut offenders, raw_offenders) = make_offenders::(1, n)?; + let validator_set_count = Session::::validators().len() as u32; + + let offence = BabeEquivocationOffence { + slot: 0u64.into(), + session_index: 0, + validator_set_count, + offender: T::convert(offenders.pop().unwrap()), + }; + assert_eq!(System::::event_count(), 0); + }: { + let _ = Offences::::report_offence(reporters, offence); + } + verify { + // make sure that all slashes have been applied + #[cfg(test)] + assert_eq!( + System::::event_count(), 0 + + 1 // offence + + 3 // reporter (reward + endowment) + + 1 // offenders reported + + 3 // offenders slashed + + 1 // offenders chilled + + 3 * n // nominators slashed + ); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/offences/benchmarking/src/lib.rs b/substrate/frame/offences/benchmarking/src/lib.rs index 563aa4755cec..b08955a13329 100644 --- a/substrate/frame/offences/benchmarking/src/lib.rs +++ b/substrate/frame/offences/benchmarking/src/lib.rs @@ -17,239 +17,13 @@ //! Offences pallet benchmarking. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -use sp_std::{prelude::*, vec}; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -use frame_benchmarking::v1::{account, benchmarks}; -use frame_support::traits::{Currency, Get}; -use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; - -use sp_runtime::{ - traits::{Convert, Saturating, StaticLookup}, - Perbill, -}; -use sp_staking::offence::ReportOffence; - -use pallet_babe::EquivocationOffence as BabeEquivocationOffence; -use pallet_balances::Config as BalancesConfig; -use pallet_grandpa::{ - EquivocationOffence as GrandpaEquivocationOffence, TimeSlot as GrandpaTimeSlot, -}; -use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; -use pallet_session::{ - historical::{Config as HistoricalConfig, IdentificationTuple}, - Config as SessionConfig, Pallet as Session, SessionManager, -}; -use pallet_staking::{ - Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, - RewardDestination, ValidatorPrefs, -}; - -const SEED: u32 = 0; - -const MAX_NOMINATORS: u32 = 100; - -pub struct Pallet(Offences); - -pub trait Config: - SessionConfig - + StakingConfig - + OffencesConfig - + HistoricalConfig - + BalancesConfig - + IdTupleConvert -{ -} - -/// A helper trait to make sure we can convert `IdentificationTuple` coming from historical -/// and the one required by offences. -pub trait IdTupleConvert { - /// Convert identification tuple from `historical` trait to the one expected by `offences`. - fn convert(id: IdentificationTuple) -> ::IdentificationTuple; -} - -impl IdTupleConvert for T -where - ::IdentificationTuple: From>, -{ - fn convert(id: IdentificationTuple) -> ::IdentificationTuple { - id.into() - } -} - -type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; - -struct Offender { - pub controller: T::AccountId, - #[allow(dead_code)] - pub stash: T::AccountId, - #[allow(dead_code)] - pub nominator_stashes: Vec, -} - -fn bond_amount() -> BalanceOf { - T::Currency::minimum_balance().saturating_mul(10_000u32.into()) -} - -fn create_offender(n: u32, nominators: u32) -> Result, &'static str> { - let stash: T::AccountId = account("stash", n, SEED); - let stash_lookup: LookupSourceOf = T::Lookup::unlookup(stash.clone()); - let reward_destination = RewardDestination::Staked; - let amount = bond_amount::(); - // add twice as much balance to prevent the account from being killed. - let free_amount = amount.saturating_mul(2u32.into()); - T::Currency::make_free_balance_be(&stash, free_amount); - Staking::::bond( - RawOrigin::Signed(stash.clone()).into(), - amount, - reward_destination.clone(), - )?; - - let validator_prefs = - ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; - Staking::::validate(RawOrigin::Signed(stash.clone()).into(), validator_prefs)?; - - let mut individual_exposures = vec![]; - let mut nominator_stashes = vec![]; - // Create n nominators - for i in 0..nominators { - let nominator_stash: T::AccountId = - account("nominator stash", n * MAX_NOMINATORS + i, SEED); - T::Currency::make_free_balance_be(&nominator_stash, free_amount); - - Staking::::bond( - RawOrigin::Signed(nominator_stash.clone()).into(), - amount, - reward_destination.clone(), - )?; - - let selected_validators: Vec> = vec![stash_lookup.clone()]; - Staking::::nominate( - RawOrigin::Signed(nominator_stash.clone()).into(), - selected_validators, - )?; - - individual_exposures - .push(IndividualExposure { who: nominator_stash.clone(), value: amount }); - nominator_stashes.push(nominator_stash.clone()); - } - - let exposure = Exposure { total: amount * n.into(), own: amount, others: individual_exposures }; - let current_era = 0u32; - Staking::::add_era_stakers(current_era, stash.clone(), exposure); - - Ok(Offender { controller: stash.clone(), stash, nominator_stashes }) -} - -fn make_offenders( - num_offenders: u32, - num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { - Staking::::new_session(0); - - let mut offenders = vec![]; - for i in 0..num_offenders { - let offender = create_offender::(i + 1, num_nominators)?; - offenders.push(offender); - } - - Staking::::start_session(0); - - let id_tuples = offenders - .iter() - .map(|offender| { - ::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id") - }) - .map(|validator_id| { - ::FullIdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification") - }) - .collect::>>(); - Ok((id_tuples, offenders)) -} - -benchmarks! { - report_offence_grandpa { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - - // for grandpa equivocation reports the number of reporters - // and offenders is always 1 - let reporters = vec![account("reporter", 1, SEED)]; - - // make sure reporters actually get rewarded - Staking::::set_slash_reward_fraction(Perbill::one()); - - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let validator_set_count = Session::::validators().len() as u32; - - let offence = GrandpaEquivocationOffence { - time_slot: GrandpaTimeSlot { set_id: 0, round: 0 }, - session_index: 0, - validator_set_count, - offender: T::convert(offenders.pop().unwrap()), - }; - assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { - // make sure that all slashes have been applied - #[cfg(test)] - assert_eq!( - System::::event_count(), 0 - + 1 // offence - + 3 // reporter (reward + endowment) - + 1 // offenders reported - + 3 // offenders slashed - + 1 // offenders chilled - + 3 * n // nominators slashed - ); - } - - report_offence_babe { - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - - // for babe equivocation reports the number of reporters - // and offenders is always 1 - let reporters = vec![account("reporter", 1, SEED)]; - - // make sure reporters actually get rewarded - Staking::::set_slash_reward_fraction(Perbill::one()); - - let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let validator_set_count = Session::::validators().len() as u32; - - let offence = BabeEquivocationOffence { - slot: 0u64.into(), - session_index: 0, - validator_set_count, - offender: T::convert(offenders.pop().unwrap()), - }; - assert_eq!(System::::event_count(), 0); - }: { - let _ = Offences::::report_offence(reporters, offence); - } - verify { - // make sure that all slashes have been applied - #[cfg(test)] - assert_eq!( - System::::event_count(), 0 - + 1 // offence - + 3 // reporter (reward + endowment) - + 1 // offenders reported - + 3 // offenders slashed - + 1 // offenders chilled - + 3 * n // nominators slashed - ); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index ea2e9e93ed68..27129e73c71e 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -17,9 +17,6 @@ //! Mock file for offences benchmarking. -#![cfg(test)] - -use super::*; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, @@ -33,7 +30,7 @@ use pallet_session::historical as pallet_session_historical; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::IdentityLookup, - BuildStorage, + BuildStorage, Perbill, }; type AccountId = u64; diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index ad3dcf1f90ea..f4d83c237b9c 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -24,7 +24,7 @@ pallet-staking = { path = "../staking", default-features = false } frame-support = { path = "../support", default-features = false } frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime" } +sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false } [dev-dependencies] @@ -34,7 +34,7 @@ pallet-staking-reward-curve = { path = "../staking/reward-curve" } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false } +sp-std = { path = "../../primitives/std" } frame-election-provider-support = { path = "../election-provider-support" } @@ -74,5 +74,4 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-staking/std", - "sp-std/std", ] diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index e6bb5bb18819..24d259ed1d4a 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -27,6 +27,9 @@ mod mock; #[cfg(test)] mod tests; +extern crate alloc; + +use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; use sp_runtime::Perbill; @@ -112,7 +115,7 @@ pub mod pallet { .into_iter() .map(|(o, _)| OffenceDetails:: { offender: (o.clone(), Staking::::eras_stakers(now, &o)), - reporters: vec![], + reporters: Default::default(), }) .collect()) } diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs new file mode 100644 index 000000000000..d86c5d9ad278 --- /dev/null +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Session Pallet. +// This is separated into its own crate due to cyclic dependency issues. + +use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; +use sp_std::{prelude::*, vec}; + +use codec::Decode; +use frame_benchmarking::v1::benchmarks; +use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; +use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; +use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; +use pallet_staking::{ + benchmarking::create_validator_with_nominators, testing_utils::create_validators, + MaxNominationsOf, RewardDestination, +}; + +const MAX_VALIDATORS: u32 = 1000; + +pub struct Pallet(pallet_session::Pallet); +pub trait Config: + pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config +{ +} + +impl OnInitialize> for Pallet { + fn on_initialize(n: BlockNumberFor) -> frame_support::weights::Weight { + pallet_session::Pallet::::on_initialize(n) + } +} + +benchmarks! { + set_keys { + let n = MaxNominationsOf::::get(); + let (v_stash, _) = create_validator_with_nominators::( + n, + MaxNominationsOf::::get(), + false, + true, + RewardDestination::Staked, + )?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; + + let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + // Whitelist controller account from further DB operations. + let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); + frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); + }: _(RawOrigin::Signed(v_controller), keys, proof) + + purge_keys { + let n = MaxNominationsOf::::get(); + let (v_stash, _) = create_validator_with_nominators::( + n, + MaxNominationsOf::::get(), + false, + true, + RewardDestination::Staked, + )?; + let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; + let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); + let proof: Vec = vec![0,1,2,3]; + Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; + // Whitelist controller account from further DB operations. + let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); + frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); + }: _(RawOrigin::Signed(v_controller)) + + #[extra] + check_membership_proof_current_session { + let n in 2 .. MAX_VALIDATORS as u32; + + let (key, key_owner_proof1) = check_membership_proof_setup::(n); + let key_owner_proof2 = key_owner_proof1.clone(); + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); + } + + #[extra] + check_membership_proof_historical_session { + let n in 2 .. MAX_VALIDATORS as u32; + + let (key, key_owner_proof1) = check_membership_proof_setup::(n); + + // skip to the next session so that the session is historical + // and the membership merkle proof must be checked. + Session::::rotate_session(); + + let key_owner_proof2 = key_owner_proof1.clone(); + }: { + Historical::::check_proof(key, key_owner_proof1); + } + verify { + assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); +} + +/// Sets up the benchmark for checking a membership proof. It creates the given +/// number of validators, sets random session keys and then creates a membership +/// proof for the first authority and returns its key and the proof. +fn check_membership_proof_setup( + n: u32, +) -> ((sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof) { + pallet_staking::ValidatorCount::::put(n); + + // create validators and set random session keys + for (n, who) in create_validators::(n, 1000).unwrap().into_iter().enumerate() { + use rand::{RngCore, SeedableRng}; + + let validator = T::Lookup::lookup(who).unwrap(); + let controller = pallet_staking::Pallet::::bonded(&validator).unwrap(); + + let keys = { + let mut keys = [0u8; 128]; + + // we keep the keys for the first validator as 0x00000... + if n > 0 { + let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); + rng.fill_bytes(&mut keys); + } + + keys + }; + + let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap(); + let proof: Vec = vec![]; + + Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); + } + + Pallet::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::one()); + + // skip sessions until the new validator set is enacted + while Session::::validators().len() < n as usize { + Session::::rotate_session(); + } + + let key = (sp_runtime::KeyTypeId(*b"babe"), &[0u8; 32]); + + (key, Historical::::prove(key).unwrap()) +} diff --git a/substrate/frame/session/benchmarking/src/lib.rs b/substrate/frame/session/benchmarking/src/lib.rs index 84258d84994f..b08955a13329 100644 --- a/substrate/frame/session/benchmarking/src/lib.rs +++ b/substrate/frame/session/benchmarking/src/lib.rs @@ -15,153 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Benchmarks for the Session Pallet. -// This is separated into its own crate due to cyclic dependency issues. +//! Offences pallet benchmarking. -#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -use sp_runtime::traits::{One, StaticLookup, TrailingZeroInput}; -use sp_std::{prelude::*, vec}; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -use codec::Decode; -use frame_benchmarking::v1::benchmarks; -use frame_support::traits::{Get, KeyOwnerProofSystem, OnInitialize}; -use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use pallet_session::{historical::Pallet as Historical, Pallet as Session, *}; -use pallet_staking::{ - benchmarking::create_validator_with_nominators, testing_utils::create_validators, - MaxNominationsOf, RewardDestination, -}; - -const MAX_VALIDATORS: u32 = 1000; - -pub struct Pallet(pallet_session::Pallet); -pub trait Config: - pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config -{ -} - -impl OnInitialize> for Pallet { - fn on_initialize(n: BlockNumberFor) -> frame_support::weights::Weight { - pallet_session::Pallet::::on_initialize(n) - } -} - -benchmarks! { - set_keys { - let n = MaxNominationsOf::::get(); - let (v_stash, _) = create_validator_with_nominators::( - n, - MaxNominationsOf::::get(), - false, - true, - RewardDestination::Staked, - )?; - let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; - - let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - // Whitelist controller account from further DB operations. - let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); - frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller), keys, proof) - - purge_keys { - let n = MaxNominationsOf::::get(); - let (v_stash, _) = create_validator_with_nominators::( - n, - MaxNominationsOf::::get(), - false, - true, - RewardDestination::Staked, - )?; - let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; - let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - let proof: Vec = vec![0,1,2,3]; - Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; - // Whitelist controller account from further DB operations. - let v_controller_key = frame_system::Account::::hashed_key_for(&v_controller); - frame_benchmarking::benchmarking::add_to_whitelist(v_controller_key.into()); - }: _(RawOrigin::Signed(v_controller)) - - #[extra] - check_membership_proof_current_session { - let n in 2 .. MAX_VALIDATORS as u32; - - let (key, key_owner_proof1) = check_membership_proof_setup::(n); - let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { - assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); - } - - #[extra] - check_membership_proof_historical_session { - let n in 2 .. MAX_VALIDATORS as u32; - - let (key, key_owner_proof1) = check_membership_proof_setup::(n); - - // skip to the next session so that the session is historical - // and the membership merkle proof must be checked. - Session::::rotate_session(); - - let key_owner_proof2 = key_owner_proof1.clone(); - }: { - Historical::::check_proof(key, key_owner_proof1); - } - verify { - assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); -} - -/// Sets up the benchmark for checking a membership proof. It creates the given -/// number of validators, sets random session keys and then creates a membership -/// proof for the first authority and returns its key and the proof. -fn check_membership_proof_setup( - n: u32, -) -> ((sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof) { - pallet_staking::ValidatorCount::::put(n); - - // create validators and set random session keys - for (n, who) in create_validators::(n, 1000).unwrap().into_iter().enumerate() { - use rand::{RngCore, SeedableRng}; - - let validator = T::Lookup::lookup(who).unwrap(); - let controller = pallet_staking::Pallet::::bonded(&validator).unwrap(); - - let keys = { - let mut keys = [0u8; 128]; - - // we keep the keys for the first validator as 0x00000... - if n > 0 { - let mut rng = rand::rngs::StdRng::seed_from_u64(n as u64); - rng.fill_bytes(&mut keys); - } - - keys - }; - - let keys: T::Keys = Decode::decode(&mut &keys[..]).unwrap(); - let proof: Vec = vec![]; - - Session::::set_keys(RawOrigin::Signed(controller).into(), keys, proof).unwrap(); - } - - Pallet::::on_initialize(frame_system::pallet_prelude::BlockNumberFor::::one()); - - // skip sessions until the new validator set is enacted - while Session::::validators().len() < n as usize { - Session::::rotate_session(); - } - - let key = (sp_runtime::KeyTypeId(*b"babe"), &[0u8; 32]); - - (key, Historical::::prove(key).unwrap()) -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index f93f4d31e777..90c446808daf 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -34,9 +34,9 @@ //! //! See [`polkadot_sdk::frame`](../polkadot_sdk_docs/polkadot_sdk/frame_runtime/index.html). //! -//! ## Warning: Experimental +//! ## WARNING: Experimental //! -//! This crate and all of its content is experimental, and should not yet be used in production. +//! **This crate and all of its content is experimental, and should not yet be used in production.** //! //! ## Underlying dependencies //! diff --git a/substrate/frame/system/benchmarking/src/inner.rs b/substrate/frame/system/benchmarking/src/inner.rs new file mode 100644 index 000000000000..c1631b0a2e33 --- /dev/null +++ b/substrate/frame/system/benchmarking/src/inner.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Frame System benchmarks. + +use codec::Encode; +use frame_benchmarking::v2::*; +use frame_support::{dispatch::DispatchClass, storage, traits::Get}; +use frame_system::{Call, Pallet as System, RawOrigin}; +use sp_core::storage::well_known_keys; +use sp_runtime::traits::Hash; +use sp_std::{prelude::*, vec}; + +pub struct Pallet(System); +pub trait Config: frame_system::Config { + /// Adds ability to the Runtime to test against their sample code. + /// + /// Default is `../res/kitchensink_runtime.compact.compressed.wasm`. + fn prepare_set_code_data() -> Vec { + include_bytes!("../res/kitchensink_runtime.compact.compressed.wasm").to_vec() + } + + /// Adds ability to the Runtime to prepare/initialize before running benchmark `set_code`. + fn setup_set_code_requirements(_code: &Vec) -> Result<(), BenchmarkError> { + Ok(()) + } + + /// Adds ability to the Runtime to do custom validation after benchmark. + /// + /// Default is checking for `CodeUpdated` event . + fn verify_set_code() { + System::::assert_last_event(frame_system::Event::::CodeUpdated.into()); + } +} + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn remark( + b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, + ) -> Result<(), BenchmarkError> { + let remark_message = vec![1; b as usize]; + let caller = whitelisted_caller(); + + #[extrinsic_call] + remark(RawOrigin::Signed(caller), remark_message); + + Ok(()) + } + + #[benchmark] + fn remark_with_event( + b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, + ) -> Result<(), BenchmarkError> { + let remark_message = vec![1; b as usize]; + let caller: T::AccountId = whitelisted_caller(); + let hash = T::Hashing::hash(&remark_message[..]); + + #[extrinsic_call] + remark_with_event(RawOrigin::Signed(caller.clone()), remark_message); + + System::::assert_last_event( + frame_system::Event::::Remarked { sender: caller, hash }.into(), + ); + Ok(()) + } + + #[benchmark] + fn set_heap_pages() -> Result<(), BenchmarkError> { + #[extrinsic_call] + set_heap_pages(RawOrigin::Root, Default::default()); + + Ok(()) + } + + #[benchmark] + fn set_code() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + + #[extrinsic_call] + set_code(RawOrigin::Root, runtime_blob); + + T::verify_set_code(); + Ok(()) + } + + #[benchmark(extra)] + fn set_code_without_checks() -> Result<(), BenchmarkError> { + // Assume Wasm ~4MB + let code = vec![1; 4_000_000 as usize]; + T::setup_set_code_requirements(&code)?; + + #[block] + { + System::::set_code_without_checks(RawOrigin::Root.into(), code)?; + } + + let current_code = + storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; + assert_eq!(current_code.len(), 4_000_000 as usize); + Ok(()) + } + + #[benchmark(skip_meta)] + fn set_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { + // Set up i items to add + let mut items = Vec::new(); + for j in 0..i { + let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); + items.push((hash.clone(), hash.clone())); + } + + let items_to_verify = items.clone(); + + #[extrinsic_call] + set_storage(RawOrigin::Root, items); + + // Verify that they're actually in the storage. + for (item, _) in items_to_verify { + let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + Ok(()) + } + + #[benchmark(skip_meta)] + fn kill_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { + // Add i items to storage + let mut items = Vec::with_capacity(i as usize); + for j in 0..i { + let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); + storage::unhashed::put_raw(&hash, &hash); + items.push(hash); + } + + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + + let items_to_verify = items.clone(); + + #[extrinsic_call] + kill_storage(RawOrigin::Root, items); + + // Verify that they're not in the storage anymore. + for item in items_to_verify { + assert!(storage::unhashed::get_raw(&item).is_none()); + } + Ok(()) + } + + #[benchmark(skip_meta)] + fn kill_prefix(p: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { + let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); + let mut items = Vec::with_capacity(p as usize); + // add p items that share a prefix + for i in 0..p { + let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); + let key = [&prefix[..], &hash[..]].concat(); + storage::unhashed::put_raw(&key, &key); + items.push(key); + } + + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + + #[extrinsic_call] + kill_prefix(RawOrigin::Root, prefix, p); + + // Verify that they're not in the storage anymore. + for item in items { + assert!(storage::unhashed::get_raw(&item).is_none()); + } + Ok(()) + } + + #[benchmark] + fn authorize_upgrade() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + let hash = T::Hashing::hash(&runtime_blob); + + #[extrinsic_call] + authorize_upgrade(RawOrigin::Root, hash); + + assert!(System::::authorized_upgrade().is_some()); + Ok(()) + } + + #[benchmark] + fn apply_authorized_upgrade() -> Result<(), BenchmarkError> { + let runtime_blob = T::prepare_set_code_data(); + T::setup_set_code_requirements(&runtime_blob)?; + let hash = T::Hashing::hash(&runtime_blob); + // Will be heavier when it needs to do verification (i.e. don't use `...without_checks`). + System::::authorize_upgrade(RawOrigin::Root.into(), hash)?; + + #[extrinsic_call] + apply_authorized_upgrade(RawOrigin::Root, runtime_blob); + + // Can't check for `CodeUpdated` in parachain upgrades. Just check that the authorization is + // gone. + assert!(System::::authorized_upgrade().is_none()); + Ok(()) + } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index 29100faa7514..e55038aeb955 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -15,221 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Benchmarks for Utility Pallet +//! Frame System benchmarks. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "runtime-benchmarks")] -use codec::Encode; -use frame_benchmarking::v2::*; -use frame_support::{dispatch::DispatchClass, storage, traits::Get}; -use frame_system::{Call, Pallet as System, RawOrigin}; -use sp_core::storage::well_known_keys; -use sp_runtime::traits::Hash; -use sp_std::{prelude::*, vec}; +#[cfg(feature = "runtime-benchmarks")] +pub mod inner; -mod mock; +#[cfg(feature = "runtime-benchmarks")] +pub use inner::*; -pub struct Pallet(System); -pub trait Config: frame_system::Config { - /// Adds ability to the Runtime to test against their sample code. - /// - /// Default is `../res/kitchensink_runtime.compact.compressed.wasm`. - fn prepare_set_code_data() -> Vec { - include_bytes!("../res/kitchensink_runtime.compact.compressed.wasm").to_vec() - } - - /// Adds ability to the Runtime to prepare/initialize before running benchmark `set_code`. - fn setup_set_code_requirements(_code: &Vec) -> Result<(), BenchmarkError> { - Ok(()) - } - - /// Adds ability to the Runtime to do custom validation after benchmark. - /// - /// Default is checking for `CodeUpdated` event . - fn verify_set_code() { - System::::assert_last_event(frame_system::Event::::CodeUpdated.into()); - } -} - -#[benchmarks] -mod benchmarks { - use super::*; - - #[benchmark] - fn remark( - b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, - ) -> Result<(), BenchmarkError> { - let remark_message = vec![1; b as usize]; - let caller = whitelisted_caller(); - - #[extrinsic_call] - remark(RawOrigin::Signed(caller), remark_message); - - Ok(()) - } - - #[benchmark] - fn remark_with_event( - b: Linear<0, { *T::BlockLength::get().max.get(DispatchClass::Normal) as u32 }>, - ) -> Result<(), BenchmarkError> { - let remark_message = vec![1; b as usize]; - let caller: T::AccountId = whitelisted_caller(); - let hash = T::Hashing::hash(&remark_message[..]); - - #[extrinsic_call] - remark_with_event(RawOrigin::Signed(caller.clone()), remark_message); - - System::::assert_last_event( - frame_system::Event::::Remarked { sender: caller, hash }.into(), - ); - Ok(()) - } - - #[benchmark] - fn set_heap_pages() -> Result<(), BenchmarkError> { - #[extrinsic_call] - set_heap_pages(RawOrigin::Root, Default::default()); - - Ok(()) - } - - #[benchmark] - fn set_code() -> Result<(), BenchmarkError> { - let runtime_blob = T::prepare_set_code_data(); - T::setup_set_code_requirements(&runtime_blob)?; - - #[extrinsic_call] - set_code(RawOrigin::Root, runtime_blob); - - T::verify_set_code(); - Ok(()) - } - - #[benchmark(extra)] - fn set_code_without_checks() -> Result<(), BenchmarkError> { - // Assume Wasm ~4MB - let code = vec![1; 4_000_000 as usize]; - T::setup_set_code_requirements(&code)?; - - #[block] - { - System::::set_code_without_checks(RawOrigin::Root.into(), code)?; - } - - let current_code = - storage::unhashed::get_raw(well_known_keys::CODE).ok_or("Code not stored.")?; - assert_eq!(current_code.len(), 4_000_000 as usize); - Ok(()) - } - - #[benchmark(skip_meta)] - fn set_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { - // Set up i items to add - let mut items = Vec::new(); - for j in 0..i { - let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); - items.push((hash.clone(), hash.clone())); - } - - let items_to_verify = items.clone(); - - #[extrinsic_call] - set_storage(RawOrigin::Root, items); - - // Verify that they're actually in the storage. - for (item, _) in items_to_verify { - let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; - assert_eq!(value, *item); - } - Ok(()) - } - - #[benchmark(skip_meta)] - fn kill_storage(i: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { - // Add i items to storage - let mut items = Vec::with_capacity(i as usize); - for j in 0..i { - let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); - storage::unhashed::put_raw(&hash, &hash); - items.push(hash); - } - - // Verify that they're actually in the storage. - for item in &items { - let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; - assert_eq!(value, *item); - } - - let items_to_verify = items.clone(); - - #[extrinsic_call] - kill_storage(RawOrigin::Root, items); - - // Verify that they're not in the storage anymore. - for item in items_to_verify { - assert!(storage::unhashed::get_raw(&item).is_none()); - } - Ok(()) - } - - #[benchmark(skip_meta)] - fn kill_prefix(p: Linear<0, { 1_000 }>) -> Result<(), BenchmarkError> { - let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); - let mut items = Vec::with_capacity(p as usize); - // add p items that share a prefix - for i in 0..p { - let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); - let key = [&prefix[..], &hash[..]].concat(); - storage::unhashed::put_raw(&key, &key); - items.push(key); - } - - // Verify that they're actually in the storage. - for item in &items { - let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; - assert_eq!(value, *item); - } - - #[extrinsic_call] - kill_prefix(RawOrigin::Root, prefix, p); - - // Verify that they're not in the storage anymore. - for item in items { - assert!(storage::unhashed::get_raw(&item).is_none()); - } - Ok(()) - } - - #[benchmark] - fn authorize_upgrade() -> Result<(), BenchmarkError> { - let runtime_blob = T::prepare_set_code_data(); - T::setup_set_code_requirements(&runtime_blob)?; - let hash = T::Hashing::hash(&runtime_blob); - - #[extrinsic_call] - authorize_upgrade(RawOrigin::Root, hash); - - assert!(System::::authorized_upgrade().is_some()); - Ok(()) - } - - #[benchmark] - fn apply_authorized_upgrade() -> Result<(), BenchmarkError> { - let runtime_blob = T::prepare_set_code_data(); - T::setup_set_code_requirements(&runtime_blob)?; - let hash = T::Hashing::hash(&runtime_blob); - // Will be heavier when it needs to do verification (i.e. don't use `...without_checks`). - System::::authorize_upgrade(RawOrigin::Root.into(), hash)?; - - #[extrinsic_call] - apply_authorized_upgrade(RawOrigin::Root, runtime_blob); - - // Can't check for `CodeUpdated` in parachain upgrades. Just check that the authorization is - // gone. - assert!(System::::authorized_upgrade().is_none()); - Ok(()) - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); -} +#[cfg(all(feature = "runtime-benchmarks", test))] +pub(crate) mod mock; diff --git a/substrate/frame/try-runtime/src/inner.rs b/substrate/frame/try-runtime/src/inner.rs new file mode 100644 index 000000000000..591124e2ad99 --- /dev/null +++ b/substrate/frame/try-runtime/src/inner.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Supporting types for try-runtime, testing and dry-running commands. + +pub use frame_support::traits::{TryStateSelect, UpgradeCheckSelect}; +use frame_support::weights::Weight; + +sp_api::decl_runtime_apis! { + /// Runtime api for testing the execution of a runtime upgrade. + pub trait TryRuntime { + /// dry-run runtime upgrades, returning the total weight consumed. + /// + /// This should do EXACTLY the same operations as the runtime would have done in the case of + /// a runtime upgrade (e.g. pallet ordering must be the same) + /// + /// Returns the consumed weight of the migration in case of a successful one, combined with + /// the total allowed block weight of the runtime. + /// + /// If `checks` is `true`, `pre_migrate` and `post_migrate` of each migration and + /// `try_state` of all pallets will be executed. Else, no. If checks are executed, the PoV + /// tracking is likely inaccurate. + fn on_runtime_upgrade(checks: UpgradeCheckSelect) -> (Weight, Weight); + + /// Execute the given block, but optionally disable state-root and signature checks. + /// + /// Optionally, a number of `try_state` hooks can also be executed after the block + /// execution. + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + try_state: TryStateSelect, + ) -> Weight; + } +} diff --git a/substrate/frame/try-runtime/src/lib.rs b/substrate/frame/try-runtime/src/lib.rs index 43292efe2104..9da2dd18ca2b 100644 --- a/substrate/frame/try-runtime/src/lib.rs +++ b/substrate/frame/try-runtime/src/lib.rs @@ -18,36 +18,9 @@ //! Supporting types for try-runtime, testing and dry-running commands. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg(feature = "try-runtime")] -pub use frame_support::traits::{TryStateSelect, UpgradeCheckSelect}; -use frame_support::weights::Weight; +#[cfg(feature = "try-runtime")] +pub mod inner; -sp_api::decl_runtime_apis! { - /// Runtime api for testing the execution of a runtime upgrade. - pub trait TryRuntime { - /// dry-run runtime upgrades, returning the total weight consumed. - /// - /// This should do EXACTLY the same operations as the runtime would have done in the case of - /// a runtime upgrade (e.g. pallet ordering must be the same) - /// - /// Returns the consumed weight of the migration in case of a successful one, combined with - /// the total allowed block weight of the runtime. - /// - /// If `checks` is `true`, `pre_migrate` and `post_migrate` of each migration and - /// `try_state` of all pallets will be executed. Else, no. If checks are executed, the PoV - /// tracking is likely inaccurate. - fn on_runtime_upgrade(checks: UpgradeCheckSelect) -> (Weight, Weight); - - /// Execute the given block, but optionally disable state-root and signature checks. - /// - /// Optionally, a number of `try_state` hooks can also be executed after the block - /// execution. - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - try_state: TryStateSelect, - ) -> Weight; - } -} +#[cfg(feature = "try-runtime")] +pub use inner::*; diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index 2420f48b1f4a..799d474aebe4 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -26,7 +26,7 @@ sp-consensus-slots = { path = "../slots", default-features = false } sp-core = { path = "../../core", default-features = false } sp-inherents = { path = "../../inherents", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } -sp-timestamp = { path = "../../timestamp", optional = true } +sp-timestamp = { path = "../../timestamp", optional = true, default-features = false } [features] default = ["std"] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 833b2af95cd1..8437497b02bd 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -37,7 +37,7 @@ ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false } sp-debug-derive = { path = "../debug-derive", default-features = false } sp-storage = { path = "../storage", default-features = false } -sp-externalities = { path = "../externalities", optional = true } +sp-externalities = { path = "../externalities", optional = true, default-features = false } futures = { version = "0.3.30", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { optional = true, workspace = true } diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index cdee4fb03e12..5314ccd6d965 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -20,9 +20,9 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } -sp-runtime = { path = "../runtime", optional = true } +sp-runtime = { path = "../runtime", optional = true, default-features = false } sp-staking = { path = "../staking", default-features = false } -sp-keystore = { path = "../keystore", optional = true } +sp-keystore = { path = "../keystore", optional = true, default-features = false } [features] default = ["std"] diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 137a232fce73..6cce469d3f91 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -19,10 +19,10 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.79", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } -sp-core = { path = "../core", optional = true } +sp-core = { path = "../core", optional = true, default-features = false } sp-inherents = { path = "../inherents", default-features = false } sp-runtime = { path = "../runtime", default-features = false } -sp-trie = { path = "../trie", optional = true } +sp-trie = { path = "../trie", optional = true, default-features = false } [features] default = ["std"] From aa78fe218014f653f883173a6e8d610964d56fbe Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 17 Apr 2024 18:43:56 +0200 Subject: [PATCH 04/74] Contracts: Refactor test builder (#4158) - Moved `substrate/frame/contracts/src/tests/builder.rs` into a pub test_utils module, so we can use that in the `pallet-contracts-mock-network` tests - Refactor xcm tests to use XCM builders, and simplify the use case for xcm-send --- .../frame/contracts/mock-network/src/lib.rs | 3 +- .../frame/contracts/mock-network/src/tests.rs | 168 +++++-------- substrate/frame/contracts/src/lib.rs | 1 + substrate/frame/contracts/src/test_utils.rs | 30 +++ .../frame/contracts/src/test_utils/builder.rs | 220 ++++++++++++++++++ substrate/frame/contracts/src/tests.rs | 43 +++- .../frame/contracts/src/tests/builder.rs | 219 ----------------- 7 files changed, 346 insertions(+), 338 deletions(-) create mode 100644 substrate/frame/contracts/src/test_utils.rs create mode 100644 substrate/frame/contracts/src/test_utils/builder.rs delete mode 100644 substrate/frame/contracts/src/tests/builder.rs diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs index 8a17a3f2fa78..20ded0f4a0b8 100644 --- a/substrate/frame/contracts/mock-network/src/lib.rs +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -23,6 +23,7 @@ pub mod relay_chain; mod tests; use crate::primitives::{AccountId, UNITS}; +pub use pallet_contracts::test_utils::{ALICE, BOB}; use sp_runtime::BuildStorage; use xcm::latest::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -31,8 +32,6 @@ use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chai // Accounts pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); -pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); -pub const BOB: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([2u8; 32]); // Balances pub const INITIAL_BALANCE: u128 = 1_000_000_000 * UNITS; diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index 39aa9bebc0f5..5632f75e7873 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -22,45 +22,31 @@ use crate::{ relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; -use frame_support::{ - pallet_prelude::Weight, - traits::{fungibles::Mutate, Currency}, -}; -use pallet_balances::{BalanceLock, Reasons}; -use pallet_contracts::{Code, CollectEvents, DebugInfo, Determinism}; +use frame_support::traits::{fungibles::Mutate, Currency}; +use pallet_contracts::{test_utils::builder::*, Code}; use pallet_contracts_fixtures::compile_module; use pallet_contracts_uapi::ReturnErrorCode; use xcm::{v4::prelude::*, VersionedLocation, VersionedXcm}; use xcm_simulator::TestExt; -type ParachainContracts = pallet_contracts::Pallet; - macro_rules! assert_return_code { ( $x:expr , $y:expr $(,)? ) => {{ assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); }}; } +fn bare_call(dest: sp_runtime::AccountId32) -> BareCallBuilder { + BareCallBuilder::::bare_call(ALICE, dest) +} + /// Instantiate the tests contract, and fund it with some balance and assets. fn instantiate_test_contract(name: &str) -> AccountId { let (wasm, _) = compile_module::(name).unwrap(); // Instantiate contract. let contract_addr = ParaA::execute_with(|| { - ParachainContracts::bare_instantiate( - ALICE, - 0, - Weight::MAX, - None, - Code::Upload(wasm), - vec![], - vec![], - DebugInfo::UnsafeDebug, - CollectEvents::Skip, - ) - .result - .unwrap() - .account_id + BareInstantiateBuilder::::bare_instantiate(ALICE, Code::Upload(wasm)) + .build_and_unwrap_account_id() }); // Funds contract account with some balance and assets. @@ -85,27 +71,18 @@ fn test_xcm_execute() { // Execute XCM instructions through the contract. ParaA::execute_with(|| { let amount: u128 = 10 * CENTS; + let assets: Asset = (Here, amount).into(); + let beneficiary = AccountId32 { network: None, id: BOB.clone().into() }; // The XCM used to transfer funds to Bob. - let message: Xcm<()> = Xcm(vec![ - WithdrawAsset(vec![(Here, amount).into()].into()), - DepositAsset { - assets: All.into(), - beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), - }, - ]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode().encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(assets.clone()) + .deposit_asset(assets, beneficiary) + .build(); + + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode().encode()) + .build(); assert_eq!(result.gas_consumed, result.gas_required); assert_return_code!(&result.result.unwrap(), ReturnErrorCode::Success); @@ -127,29 +104,22 @@ fn test_xcm_execute_incomplete() { // Execute XCM instructions through the contract. ParaA::execute_with(|| { + let assets: Asset = (Here, amount).into(); + let beneficiary = AccountId32 { network: None, id: BOB.clone().into() }; + // The XCM used to transfer funds to Bob. - let message: Xcm<()> = Xcm(vec![ - WithdrawAsset(vec![(Here, amount).into()].into()), + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(assets.clone()) // This will fail as the contract does not have enough balance to complete both // withdrawals. - WithdrawAsset(vec![(Here, INITIAL_BALANCE).into()].into()), - DepositAsset { - assets: All.into(), - beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), - }, - ]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode().encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + .withdraw_asset((Here, INITIAL_BALANCE)) + .buy_execution(assets.clone(), Unlimited) + .deposit_asset(assets, beneficiary) + .build(); + + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode().encode()) + .build(); assert_eq!(result.gas_consumed, result.gas_required); assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); @@ -175,28 +145,16 @@ fn test_xcm_execute_reentrant_call() { }); // The XCM used to transfer funds to Bob. - let message: Xcm = Xcm(vec![ - Transact { - origin_kind: OriginKind::Native, - require_weight_at_most: 1_000_000_000.into(), - call: transact_call.encode().into(), - }, - ExpectTransactStatus(MaybeErrorCode::Success), - ]); - - let result = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - VersionedXcm::V4(message).encode().encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let message: Xcm = Xcm::builder_unsafe() + .transact(OriginKind::Native, 1_000_000_000, transact_call.encode()) + .expect_transact_status(MaybeErrorCode::Success) + .build(); - assert_return_code!(&result.result.unwrap(), ReturnErrorCode::XcmExecutionFailed); + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode().encode()) + .build_and_unwrap_result(); + + assert_return_code!(&result, ReturnErrorCode::XcmExecutionFailed); // Funds should not change hands as the XCM transact failed. assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); @@ -207,40 +165,36 @@ fn test_xcm_execute_reentrant_call() { fn test_xcm_send() { MockNet::reset(); let contract_addr = instantiate_test_contract("xcm_send"); + let amount = 1_000 * CENTS; let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` - // Send XCM instructions through the contract, to lock some funds on the relay chain. + // Send XCM instructions through the contract, to transfer some funds from the contract + // derivative account to Alice on the relay chain. ParaA::execute_with(|| { - let dest = Location::from(Parent); - let dest = VersionedLocation::V4(dest); - - let message: Xcm<()> = Xcm(vec![ - WithdrawAsset((Here, fee).into()), - BuyExecution { fees: (Here, fee).into(), weight_limit: WeightLimit::Unlimited }, - LockAsset { asset: (Here, 5 * CENTS).into(), unlocker: (Parachain(1)).into() }, - ]); - let message = VersionedXcm::V4(message); - let exec = ParachainContracts::bare_call( - ALICE, - contract_addr.clone(), - 0, - Weight::MAX, - None, - (dest, message.encode()).encode(), - DebugInfo::UnsafeDebug, - CollectEvents::UnsafeCollect, - Determinism::Enforced, - ); + let dest = VersionedLocation::V4(Parent.into()); + let assets: Asset = (Here, amount).into(); + let beneficiary = AccountId32 { network: None, id: ALICE.clone().into() }; + + let message: Xcm<()> = Xcm::builder() + .withdraw_asset(assets.clone()) + .buy_execution((Here, fee), Unlimited) + .deposit_asset(assets, beneficiary) + .build(); + + let result = bare_call(contract_addr.clone()) + .data((dest, VersionedXcm::V4(message).encode()).encode()) + .build_and_unwrap_result(); - let mut data = &exec.result.unwrap().data[..]; + let mut data = &result.data[..]; XcmHash::decode(&mut data).expect("Failed to decode xcm_send message_id"); }); Relay::execute_with(|| { - // Check if the funds are locked on the relay chain. + let derived_contract_addr = ¶chain_account_sovereign_account_id(1, contract_addr); assert_eq!( - relay_chain::Balances::locks(¶chain_account_sovereign_account_id(1, contract_addr)), - vec![BalanceLock { id: *b"py/xcmlk", amount: 5 * CENTS, reasons: Reasons::All }] + INITIAL_BALANCE - amount, + relay_chain::Balances::free_balance(derived_contract_addr) ); + assert_eq!(INITIAL_BALANCE + amount - fee, relay_chain::Balances::free_balance(ALICE)); }); } diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index edc4c872bfce..b381fd2dc4f0 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -101,6 +101,7 @@ mod wasm; pub mod chain_extension; pub mod debug; pub mod migration; +pub mod test_utils; pub mod weights; #[cfg(test)] diff --git a/substrate/frame/contracts/src/test_utils.rs b/substrate/frame/contracts/src/test_utils.rs new file mode 100644 index 000000000000..564b2d2e3bd2 --- /dev/null +++ b/substrate/frame/contracts/src/test_utils.rs @@ -0,0 +1,30 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Shared utilities for testing contracts. +//! This is not part of the tests module because it is made public for other crates to use. +#![cfg(feature = "std")] +use frame_support::weights::Weight; +pub use sp_runtime::AccountId32; + +pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); +pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); +pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); +pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); + +pub const GAS_LIMIT: Weight = Weight::from_parts(100_000_000_000, 3 * 1024 * 1024); +pub mod builder; diff --git a/substrate/frame/contracts/src/test_utils/builder.rs b/substrate/frame/contracts/src/test_utils/builder.rs new file mode 100644 index 000000000000..94540eca5b4b --- /dev/null +++ b/substrate/frame/contracts/src/test_utils/builder.rs @@ -0,0 +1,220 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::GAS_LIMIT; +use crate::{ + AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, Config, + ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, + ExecReturnValue, InstantiateReturnValue, OriginFor, Pallet, Weight, +}; +use codec::{Encode, HasCompact}; +use core::fmt::Debug; +use frame_support::pallet_prelude::DispatchResultWithPostInfo; +use paste::paste; +use scale_info::TypeInfo; + +/// Helper macro to generate a builder for contract API calls. +macro_rules! builder { + // Entry point to generate a builder for the given method. + ( + $method:ident($($field:ident: $type:ty,)*) -> $result:ty; + $($extra:item)* + ) => { + paste!{ + builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result; $($extra)*); + } + }; + // Generate the builder struct and its methods. + ( + $name:ident, + $method:ident($($field:ident: $type:ty,)*) -> $result:ty; + $($extra:item)* + ) => { + #[doc = concat!("A builder to construct a ", stringify!($method), " call")] + pub struct $name { + $($field: $type,)* + } + + #[allow(dead_code)] + impl $name + where + as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, + { + $( + #[doc = concat!("Set the ", stringify!($field))] + pub fn $field(mut self, value: $type) -> Self { + self.$field = value; + self + } + )* + + #[doc = concat!("Build the ", stringify!($method), " call")] + pub fn build(self) -> $result { + Pallet::::$method( + $(self.$field,)* + ) + } + + $($extra)* + } + } +} + +builder!( + instantiate_with_code( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo; + + /// Create an [`InstantiateWithCodeBuilder`] with default values. + pub fn instantiate_with_code(origin: OriginFor, code: Vec) -> Self { + Self { + origin: origin, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + } + } +); + +builder!( + instantiate( + origin: OriginFor, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo; + + /// Create an [`InstantiateBuilder`] with default values. + pub fn instantiate(origin: OriginFor, code_hash: CodeHash) -> Self { + Self { + origin, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code_hash, + data: vec![], + salt: vec![], + } + } +); + +builder!( + bare_instantiate( + origin: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + code: Code>, + data: Vec, + salt: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + ) -> ContractInstantiateResult, BalanceOf, EventRecordOf>; + + /// Build the instantiate call and unwrap the result. + pub fn build_and_unwrap_result(self) -> InstantiateReturnValue> { + self.build().result.unwrap() + } + + /// Build the instantiate call and unwrap the account id. + pub fn build_and_unwrap_account_id(self) -> AccountIdOf { + self.build().result.unwrap().account_id + } + + pub fn bare_instantiate(origin: AccountIdOf, code: Code>) -> Self { + Self { + origin, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + code, + data: vec![], + salt: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + } + } +); + +builder!( + call( + origin: OriginFor, + dest: AccountIdLookupOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + data: Vec, + ) -> DispatchResultWithPostInfo; + + /// Create a [`CallBuilder`] with default values. + pub fn call(origin: OriginFor, dest: AccountIdLookupOf) -> Self { + CallBuilder { + origin, + dest, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + } + } +); + +builder!( + bare_call( + origin: AccountIdOf, + dest: AccountIdOf, + value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option>, + data: Vec, + debug: DebugInfo, + collect_events: CollectEvents, + determinism: Determinism, + ) -> ContractExecResult, EventRecordOf>; + + /// Build the call and unwrap the result. + pub fn build_and_unwrap_result(self) -> ExecReturnValue { + self.build().result.unwrap() + } + + /// Create a [`BareCallBuilder`] with default values. + pub fn bare_call(origin: AccountIdOf, dest: AccountIdOf) -> Self { + Self { + origin, + dest, + value: 0u32.into(), + gas_limit: GAS_LIMIT, + storage_deposit_limit: None, + data: vec![], + debug: DebugInfo::Skip, + collect_events: CollectEvents::Skip, + determinism: Determinism::Enforced, + } + } +); diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 57b804a51e41..8fe845fcf0f8 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod builder; mod pallet_dummy; mod test_debug; @@ -98,7 +97,6 @@ macro_rules! assert_refcount { } pub mod test_utils { - use super::{Contracts, DepositPerByte, DepositPerItem, Hash, SysConfig, Test}; use crate::{ exec::AccountIdOf, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, @@ -166,6 +164,38 @@ pub mod test_utils { } } +mod builder { + use super::Test; + use crate::{ + test_utils::{builder::*, AccountId32, ALICE}, + tests::RuntimeOrigin, + AccountIdLookupOf, Code, CodeHash, + }; + + pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { + BareInstantiateBuilder::::bare_instantiate(ALICE, code) + } + + pub fn bare_call(dest: AccountId32) -> BareCallBuilder { + BareCallBuilder::::bare_call(ALICE, dest) + } + + pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { + InstantiateWithCodeBuilder::::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + code, + ) + } + + pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { + InstantiateBuilder::::instantiate(RuntimeOrigin::signed(ALICE), code_hash) + } + + pub fn call(dest: AccountIdLookupOf) -> CallBuilder { + CallBuilder::::call(RuntimeOrigin::signed(ALICE), dest) + } +} + impl Test { pub fn set_unstable_interface(unstable_interface: bool) { UNSTABLE_INTERFACE.with(|v| *v.borrow_mut() = unstable_interface); @@ -2439,14 +2469,7 @@ fn failed_deposit_charge_should_roll_back_call() { transfer_proxy_call, ); - >::call( - RuntimeOrigin::signed(ALICE), - addr_caller.clone(), - 0, - GAS_LIMIT, - None, - data.encode(), - ) + builder::call(addr_caller).data(data.encode()).build() }) }; diff --git a/substrate/frame/contracts/src/tests/builder.rs b/substrate/frame/contracts/src/tests/builder.rs deleted file mode 100644 index 08d12503a290..000000000000 --- a/substrate/frame/contracts/src/tests/builder.rs +++ /dev/null @@ -1,219 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{AccountId32, Test, ALICE, GAS_LIMIT}; -use crate::{ - tests::RuntimeOrigin, AccountIdLookupOf, AccountIdOf, BalanceOf, Code, CodeHash, CollectEvents, - ContractExecResult, ContractInstantiateResult, DebugInfo, Determinism, EventRecordOf, - ExecReturnValue, OriginFor, Pallet, Weight, -}; -use codec::Compact; -use frame_support::pallet_prelude::DispatchResultWithPostInfo; -use paste::paste; - -/// Helper macro to generate a builder for contract API calls. -macro_rules! builder { - // Entry point to generate a builder for the given method. - ( - $method:ident($($field:ident: $type:ty,)*) -> $result:ty - ) => { - paste!{ - builder!([< $method:camel Builder >], $method($($field: $type,)* ) -> $result); - } - }; - // Generate the builder struct and its methods. - ( - $name:ident, - $method:ident( - $($field:ident: $type:ty,)* - ) -> $result:ty - ) => { - #[doc = concat!("A builder to construct a ", stringify!($method), " call")] - pub struct $name { - $($field: $type,)* - } - - #[allow(dead_code)] - impl $name - { - $( - #[doc = concat!("Set the ", stringify!($field))] - pub fn $field(mut self, value: $type) -> Self { - self.$field = value; - self - } - )* - - #[doc = concat!("Build the ", stringify!($method), " call")] - pub fn build(self) -> $result { - Pallet::::$method( - $(self.$field,)* - ) - } - } - } -} - -builder!( - instantiate_with_code( - origin: OriginFor, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>>, - code: Vec, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo -); - -builder!( - instantiate( - origin: OriginFor, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>>, - code_hash: CodeHash, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo -); - -builder!( - bare_instantiate( - origin: AccountIdOf, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>, - code: Code>, - data: Vec, - salt: Vec, - debug: DebugInfo, - collect_events: CollectEvents, - ) -> ContractInstantiateResult, BalanceOf, EventRecordOf> -); - -builder!( - call( - origin: OriginFor, - dest: AccountIdLookupOf, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>>, - data: Vec, - ) -> DispatchResultWithPostInfo -); - -builder!( - bare_call( - origin: AccountIdOf, - dest: AccountIdOf, - value: BalanceOf, - gas_limit: Weight, - storage_deposit_limit: Option>, - data: Vec, - debug: DebugInfo, - collect_events: CollectEvents, - determinism: Determinism, - ) -> ContractExecResult, EventRecordOf> -); - -/// Create a [`BareInstantiateBuilder`] with default values. -pub fn bare_instantiate(code: Code>) -> BareInstantiateBuilder { - BareInstantiateBuilder { - origin: ALICE, - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - code, - data: vec![], - salt: vec![], - debug: DebugInfo::Skip, - collect_events: CollectEvents::Skip, - } -} - -impl BareInstantiateBuilder { - /// Build the instantiate call and unwrap the result. - pub fn build_and_unwrap_result(self) -> crate::InstantiateReturnValue> { - self.build().result.unwrap() - } - - /// Build the instantiate call and unwrap the account id. - pub fn build_and_unwrap_account_id(self) -> AccountIdOf { - self.build().result.unwrap().account_id - } -} - -/// Create a [`BareCallBuilder`] with default values. -pub fn bare_call(dest: AccountId32) -> BareCallBuilder { - BareCallBuilder { - origin: ALICE, - dest, - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - data: vec![], - debug: DebugInfo::Skip, - collect_events: CollectEvents::Skip, - determinism: Determinism::Enforced, - } -} - -impl BareCallBuilder { - /// Build the call and unwrap the result. - pub fn build_and_unwrap_result(self) -> ExecReturnValue { - self.build().result.unwrap() - } -} - -/// Create an [`InstantiateWithCodeBuilder`] with default values. -pub fn instantiate_with_code(code: Vec) -> InstantiateWithCodeBuilder { - InstantiateWithCodeBuilder { - origin: RuntimeOrigin::signed(ALICE), - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - code, - data: vec![], - salt: vec![], - } -} - -/// Create an [`InstantiateBuilder`] with default values. -pub fn instantiate(code_hash: CodeHash) -> InstantiateBuilder { - InstantiateBuilder { - origin: RuntimeOrigin::signed(ALICE), - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - code_hash, - data: vec![], - salt: vec![], - } -} - -/// Create a [`CallBuilder`] with default values. -pub fn call(dest: AccountIdLookupOf) -> CallBuilder { - CallBuilder { - origin: RuntimeOrigin::signed(ALICE), - dest, - value: 0, - gas_limit: GAS_LIMIT, - storage_deposit_limit: None, - data: vec![], - } -} From 305d311d5c732fcc4629f3295768f1ed44ef434c Mon Sep 17 00:00:00 2001 From: Muharem Date: Wed, 17 Apr 2024 18:45:01 +0200 Subject: [PATCH 05/74] Asset Conversion: Pool Touch Call (#3251) Introduce `touch` call designed to address operational prerequisites before providing liquidity to a pool. This function ensures that essential requirements, such as the presence of the pool's accounts, are fulfilled. It is particularly beneficial in scenarios where a pool creator removes the pool's accounts without providing liquidity. --------- Co-authored-by: command-bot <> --- .../src/weights/pallet_asset_conversion.rs | 22 +++ .../src/weights/pallet_asset_conversion.rs | 22 +++ prdoc/pr_3251.prdoc | 14 ++ .../asset-conversion/src/benchmarking.rs | 67 +++++++-- substrate/frame/asset-conversion/src/lib.rs | 65 ++++++++- .../frame/asset-conversion/src/weights.rs | 129 ++++++++++++------ substrate/frame/assets/src/lib.rs | 2 + 7 files changed, 268 insertions(+), 53 deletions(-) create mode 100644 prdoc/pr_3251.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs index 0486932d1d6e..ec5a4084361f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_asset_conversion.rs @@ -154,4 +154,26 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 381_000_000 picoseconds. + Weight::from_parts(398_540_909, 6360) + // Standard Error: 1_330_283 + .saturating_add(Weight::from_parts(209_463_636, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs index 7a5aed3d7c69..1c5b9be8f8e6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_asset_conversion.rs @@ -153,4 +153,26 @@ impl pallet_asset_conversion::WeightInfo for WeightInfo .saturating_add(T::DbWeight::get().writes(4)) .saturating_add(Weight::from_parts(0, 393).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 381_000_000 picoseconds. + Weight::from_parts(398_540_909, 6360) + // Standard Error: 1_330_283 + .saturating_add(Weight::from_parts(209_463_636, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } diff --git a/prdoc/pr_3251.prdoc b/prdoc/pr_3251.prdoc new file mode 100644 index 000000000000..1f95c228f7a8 --- /dev/null +++ b/prdoc/pr_3251.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Asset Conversion: Pool Touch Call" + +doc: + - audience: Runtime Dev + description: | + Introduce `touch` call designed to address operational prerequisites before providing liquidity to a pool. + This function ensures that essential requirements, such as the presence of the pool's accounts, are fulfilled. + It is particularly beneficial in scenarios where a pool creator removes the pool's accounts without providing liquidity. + +crates: + - name: pallet-asset-conversion diff --git a/substrate/frame/asset-conversion/src/benchmarking.rs b/substrate/frame/asset-conversion/src/benchmarking.rs index f0e02c802ad8..c5f68476b1d0 100644 --- a/substrate/frame/asset-conversion/src/benchmarking.rs +++ b/substrate/frame/asset-conversion/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_support::{ assert_ok, traits::{ fungible::NativeOrWithId, - fungibles::{Create, Inspect, Mutate}, + fungibles::{Create, Inspect, Mutate, Refund}, }, }; use frame_system::RawOrigin as SystemOrigin; @@ -75,12 +75,21 @@ where } /// Create the `asset` and mint the `amount` for the `caller`. -fn create_asset(caller: &T::AccountId, asset: &T::AssetKind, amount: T::Balance) -where +fn create_asset( + caller: &T::AccountId, + asset: &T::AssetKind, + amount: T::Balance, + is_sufficient: bool, +) where T::Assets: Create + Mutate, { if !T::Assets::asset_exists(asset.clone()) { - assert_ok!(T::Assets::create(asset.clone(), caller.clone(), true, T::Balance::one())); + assert_ok!(T::Assets::create( + asset.clone(), + caller.clone(), + is_sufficient, + T::Balance::one() + )); } assert_ok!(T::Assets::mint_into( asset.clone(), @@ -141,8 +150,8 @@ where T::Assets::minimum_balance(asset1.clone()), T::Assets::minimum_balance(asset2.clone()), ); - create_asset::(caller, asset1, liquidity1); - create_asset::(caller, asset2, liquidity2); + create_asset::(caller, asset1, liquidity1, true); + create_asset::(caller, asset2, liquidity2, true); let lp_token = AssetConversion::::get_next_pool_asset_id(); mint_setup_fee_asset::(caller, asset1, asset2, &lp_token); @@ -172,8 +181,8 @@ mod benchmarks { fn create_pool() { let caller: T::AccountId = whitelisted_caller(); let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); - create_asset::(&caller, &asset1, T::Assets::minimum_balance(asset1.clone())); - create_asset::(&caller, &asset2, T::Assets::minimum_balance(asset2.clone())); + create_asset::(&caller, &asset1, T::Assets::minimum_balance(asset1.clone()), true); + create_asset::(&caller, &asset2, T::Assets::minimum_balance(asset2.clone()), true); let lp_token = AssetConversion::::get_next_pool_asset_id(); create_fee_asset::(&caller); @@ -358,5 +367,47 @@ mod benchmarks { assert_eq!(actual_balance, init_caller_balance + T::Balance::one()); } + #[benchmark] + fn touch(n: Linear<0, 3>) { + let caller: T::AccountId = whitelisted_caller(); + let (asset1, asset2) = T::BenchmarkHelper::create_pair(0, 1); + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2).unwrap(); + let pool_account = T::PoolLocator::address(&pool_id).unwrap(); + + create_fee_asset::(&caller); + create_asset::(&caller, &asset1, ::Balance::one(), false); + create_asset::(&caller, &asset2, ::Balance::one(), false); + let lp_token = AssetConversion::::get_next_pool_asset_id(); + mint_setup_fee_asset::(&caller, &asset1, &asset2, &lp_token); + + assert_ok!(AssetConversion::::create_pool( + SystemOrigin::Signed(caller.clone()).into(), + Box::new(asset1.clone()), + Box::new(asset2.clone()) + )); + + if n > 0 && + ::Assets::deposit_held(asset1.clone(), pool_account.clone()).is_some() + { + let _ = ::Assets::refund(asset1.clone(), pool_account.clone()); + } + if n > 1 && + ::Assets::deposit_held(asset2.clone(), pool_account.clone()).is_some() + { + let _ = ::Assets::refund(asset2.clone(), pool_account.clone()); + } + if n > 2 && + ::PoolAssets::deposit_held(lp_token.clone(), pool_account.clone()) + .is_some() + { + let _ = ::PoolAssets::refund(lp_token, pool_account); + } + + #[extrinsic_call] + _(SystemOrigin::Signed(caller.clone()), Box::new(asset1.clone()), Box::new(asset2.clone())); + + assert_last_event::(Event::Touched { pool_id, who: caller }.into()); + } + impl_benchmark_test_suite!(AssetConversion, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/asset-conversion/src/lib.rs b/substrate/frame/asset-conversion/src/lib.rs index 0bf73e8809cf..bb6e70a7fe93 100644 --- a/substrate/frame/asset-conversion/src/lib.rs +++ b/substrate/frame/asset-conversion/src/lib.rs @@ -98,7 +98,10 @@ use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::{DispatchResult, *}, + traits::fungibles::Refund, + }; use frame_system::pallet_prelude::*; use sp_arithmetic::{traits::Unsigned, Permill}; @@ -130,7 +133,8 @@ pub mod pallet { type Assets: Inspect + Mutate + AccountTouch - + Balanced; + + Balanced + + Refund; /// Liquidity pool identifier. type PoolId: Parameter + MaxEncodedLen + Ord; @@ -149,7 +153,8 @@ pub mod pallet { type PoolAssets: Inspect + Create + Mutate - + AccountTouch; + + AccountTouch + + Refund; /// A % the liquidity providers will take of every swap. Represents 10ths of a percent. #[pallet::constant] @@ -281,6 +286,13 @@ pub mod pallet { /// E.g. (A, amount_in) -> (Dot, amount_out) -> (B, amount_out) path: BalancePath, }, + /// Pool has been touched in order to fulfill operational requirements. + Touched { + /// The ID of the pool. + pool_id: T::PoolId, + /// The account initiating the touch. + who: T::AccountId, + }, } #[pallet::error] @@ -391,7 +403,9 @@ pub mod pallet { NextPoolAssetId::::set(Some(next_lp_token_id)); T::PoolAssets::create(lp_token.clone(), pool_account.clone(), false, 1u32.into())?; - T::PoolAssets::touch(lp_token.clone(), &pool_account, &sender)?; + if T::PoolAssets::should_touch(lp_token.clone(), &pool_account) { + T::PoolAssets::touch(lp_token.clone(), &pool_account, &sender)? + }; let pool_info = PoolInfo { lp_token: lp_token.clone() }; Pools::::insert(pool_id.clone(), pool_info); @@ -656,6 +670,49 @@ pub mod pallet { )?; Ok(()) } + + /// Touch an existing pool to fulfill prerequisites before providing liquidity, such as + /// ensuring that the pool's accounts are in place. It is typically useful when a pool + /// creator removes the pool's accounts and does not provide a liquidity. This action may + /// involve holding assets from the caller as a deposit for creating the pool's accounts. + /// + /// The origin must be Signed. + /// + /// - `asset1`: The asset ID of an existing pool with a pair (asset1, asset2). + /// - `asset2`: The asset ID of an existing pool with a pair (asset1, asset2). + /// + /// Emits `Touched` event when successful. + #[pallet::call_index(5)] + #[pallet::weight(T::WeightInfo::touch(3))] + pub fn touch( + origin: OriginFor, + asset1: Box, + asset2: Box, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let pool_id = T::PoolLocator::pool_id(&asset1, &asset2) + .map_err(|_| Error::::InvalidAssetPair)?; + let pool = Pools::::get(&pool_id).ok_or(Error::::PoolNotFound)?; + let pool_account = + T::PoolLocator::address(&pool_id).map_err(|_| Error::::InvalidAssetPair)?; + + let mut refunds_number: u32 = 0; + if T::Assets::should_touch(*asset1.clone(), &pool_account) { + T::Assets::touch(*asset1, &pool_account, &who)?; + refunds_number += 1; + } + if T::Assets::should_touch(*asset2.clone(), &pool_account) { + T::Assets::touch(*asset2, &pool_account, &who)?; + refunds_number += 1; + } + if T::PoolAssets::should_touch(pool.lp_token.clone(), &pool_account) { + T::PoolAssets::touch(pool.lp_token, &pool_account, &who)?; + refunds_number += 1; + } + Self::deposit_event(Event::Touched { pool_id, who }); + Ok(Some(T::WeightInfo::touch(refunds_number)).into()) + } } impl Pallet { diff --git a/substrate/frame/asset-conversion/src/weights.rs b/substrate/frame/asset-conversion/src/weights.rs index 212f56073f94..9aea19dbf57c 100644 --- a/substrate/frame/asset-conversion/src/weights.rs +++ b/substrate/frame/asset-conversion/src/weights.rs @@ -18,27 +18,25 @@ //! Autogenerated weights for `pallet_asset_conversion` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-04-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-03-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-anb7yjbi-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-p5qp1txx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_asset_conversion -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --output=./substrate/frame/asset-conversion/src/weights.rs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_asset_conversion +// --chain=dev // --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/asset-conversion/src/weights.rs // --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -56,6 +54,7 @@ pub trait WeightInfo { fn remove_liquidity() -> Weight; fn swap_exact_tokens_for_tokens(n: u32, ) -> Weight; fn swap_tokens_for_exact_tokens(n: u32, ) -> Weight; + fn touch(n: u32, ) -> Weight; } /// Weights for `pallet_asset_conversion` using the Substrate node and recommended hardware. @@ -63,7 +62,7 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -77,9 +76,9 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `910` // Estimated: `6360` - // Minimum execution time: 86_709_000 picoseconds. - Weight::from_parts(88_841_000, 6360) - .saturating_add(T::DbWeight::get().reads(7_u64)) + // Minimum execution time: 95_080_000 picoseconds. + Weight::from_parts(97_241_000, 6360) + .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -98,8 +97,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 148_672_000 picoseconds. - Weight::from_parts(151_824_000, 11426) + // Minimum execution time: 147_652_000 picoseconds. + Weight::from_parts(153_331_000, 11426) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(10_u64)) } @@ -117,8 +116,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 130_743_000 picoseconds. - Weight::from_parts(132_793_000, 11426) + // Minimum execution time: 130_738_000 picoseconds. + Weight::from_parts(134_350_000, 11426) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(8_u64)) } @@ -131,10 +130,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 81_173_000 picoseconds. - Weight::from_parts(82_574_000, 990) - // Standard Error: 335_929 - .saturating_add(Weight::from_parts(11_607_291, 0).saturating_mul(n.into())) + // Minimum execution time: 79_681_000 picoseconds. + Weight::from_parts(81_461_000, 990) + // Standard Error: 320_959 + .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -148,21 +147,45 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 80_562_000 picoseconds. - Weight::from_parts(82_501_000, 990) - // Standard Error: 329_460 - .saturating_add(Weight::from_parts(11_295_339, 0).saturating_mul(n.into())) + // Minimum execution time: 78_988_000 picoseconds. + Weight::from_parts(81_025_000, 990) + // Standard Error: 320_021 + .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 45_757_000 picoseconds. + Weight::from_parts(48_502_032, 6360) + // Standard Error: 62_850 + .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(8_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } // For backwards compatibility and tests. impl WeightInfo for () { /// Storage: `AssetConversion::Pools` (r:1 w:1) /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) + /// Storage: `System::Account` (r:2 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Assets::Asset` (r:2 w:0) /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) @@ -176,9 +199,9 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `910` // Estimated: `6360` - // Minimum execution time: 86_709_000 picoseconds. - Weight::from_parts(88_841_000, 6360) - .saturating_add(RocksDbWeight::get().reads(7_u64)) + // Minimum execution time: 95_080_000 picoseconds. + Weight::from_parts(97_241_000, 6360) + .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `AssetConversion::Pools` (r:1 w:0) @@ -197,8 +220,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1507` // Estimated: `11426` - // Minimum execution time: 148_672_000 picoseconds. - Weight::from_parts(151_824_000, 11426) + // Minimum execution time: 147_652_000 picoseconds. + Weight::from_parts(153_331_000, 11426) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(10_u64)) } @@ -216,8 +239,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `11426` - // Minimum execution time: 130_743_000 picoseconds. - Weight::from_parts(132_793_000, 11426) + // Minimum execution time: 130_738_000 picoseconds. + Weight::from_parts(134_350_000, 11426) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(8_u64)) } @@ -230,10 +253,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 81_173_000 picoseconds. - Weight::from_parts(82_574_000, 990) - // Standard Error: 335_929 - .saturating_add(Weight::from_parts(11_607_291, 0).saturating_mul(n.into())) + // Minimum execution time: 79_681_000 picoseconds. + Weight::from_parts(81_461_000, 990) + // Standard Error: 320_959 + .saturating_add(Weight::from_parts(11_223_703, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) @@ -247,12 +270,36 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `89 + n * (419 ±0)` // Estimated: `990 + n * (5218 ±0)` - // Minimum execution time: 80_562_000 picoseconds. - Weight::from_parts(82_501_000, 990) - // Standard Error: 329_460 - .saturating_add(Weight::from_parts(11_295_339, 0).saturating_mul(n.into())) + // Minimum execution time: 78_988_000 picoseconds. + Weight::from_parts(81_025_000, 990) + // Standard Error: 320_021 + .saturating_add(Weight::from_parts(11_040_712, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(n.into()))) .saturating_add(Weight::from_parts(0, 5218).saturating_mul(n.into())) } + /// Storage: `AssetConversion::Pools` (r:1 w:0) + /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Assets::Asset` (r:2 w:2) + /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Assets::Account` (r:2 w:2) + /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Asset` (r:1 w:1) + /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) + /// Storage: `PoolAssets::Account` (r:1 w:1) + /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 3]`. + fn touch(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `1571` + // Estimated: `6360` + // Minimum execution time: 45_757_000 picoseconds. + Weight::from_parts(48_502_032, 6360) + // Standard Error: 62_850 + .saturating_add(Weight::from_parts(19_450_978, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(8_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(n.into()))) + } } diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 37073b280655..9056b1eefbdc 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -1697,7 +1697,9 @@ pub mod pallet { fn should_touch(asset: T::AssetId, who: &T::AccountId) -> bool { match Asset::::get(&asset) { + // refer to the [`Self::new_account`] function for more details. Some(info) if info.is_sufficient => false, + Some(_) if frame_system::Pallet::::can_accrue_consumers(who, 2) => false, Some(_) => !Account::::contains_key(asset, who), _ => true, } From d591b16f6b1dec88003323cdae0c3abe3b5c9cbe Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Thu, 18 Apr 2024 13:44:49 +0700 Subject: [PATCH 06/74] Remove NotConcrete error (#3867) # Description - Link to issue: https://github.com/paritytech/polkadot-sdk/issues/3651 polkadot address: 19nSqFQorfF2HxD3oBzWM3oCh4SaCRKWt1yvmgaPYGCo71J --- polkadot/xcm/pallet-xcm/src/lib.rs | 9 ++++++--- polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs | 2 -- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index cf22b86cf82c..698ec6998b49 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -535,20 +535,24 @@ pub mod pallet { LockNotFound, /// The unlock operation cannot succeed because there are still consumers of the lock. InUse, - /// Invalid non-concrete asset. - InvalidAssetNotConcrete, /// Invalid asset, reserve chain could not be determined for it. + #[codec(index = 21)] InvalidAssetUnknownReserve, /// Invalid asset, do not support remote asset reserves with different fees reserves. + #[codec(index = 22)] InvalidAssetUnsupportedReserve, /// Too many assets with different reserve locations have been attempted for transfer. + #[codec(index = 23)] TooManyReserves, /// Local XCM execution incomplete. + #[codec(index = 24)] LocalExecutionIncomplete, /// Could not decode XCM. + #[codec(index = 25)] UnableToDecode, /// XCM encoded length is too large. /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. + #[codec(index = 26)] XcmTooLarge, } @@ -565,7 +569,6 @@ pub mod pallet { impl From for Error { fn from(e: AssetTransferError) -> Self { match e { - AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, } } diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs index 6d72eaf680fd..22e4a3bd61a8 100644 --- a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -23,8 +23,6 @@ use xcm::prelude::*; /// Errors related to determining asset transfer support. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] pub enum Error { - /// Invalid non-concrete asset. - NotConcrete, /// Reserve chain could not be determined for assets. UnknownReserve, } From b6fab8046e42283d14e9fa6beda32c878b3e801e Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 09:40:45 +0200 Subject: [PATCH 07/74] [ci] Run `test-linux-stable-int` on self-hosted GitHub Runners (#4178) PR adds `test-linux-stable-int` and `quick-benchmarks` as github action jobs. It's a copy of `test-linux-stable-int` and `quick-benchmarks` from gitlab ci and now it's needed to make a stress test for self-hosted github runners. `test-linux-stable-int` and `quick-benchmarks` in gitlab are still `Required` whereas this workflow is allowed to fail. cc https://github.com/paritytech/infrastructure/issues/46 --- .github/workflows/test-github-actions.yml | 43 +++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 .github/workflows/test-github-actions.yml diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml new file mode 100644 index 000000000000..a78420dfe5e7 --- /dev/null +++ b/.github/workflows/test-github-actions.yml @@ -0,0 +1,43 @@ +name: test-github-actions + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + test-linux-stable-int: + runs-on: arc-runners-polkadot-sdk + timeout-minutes: 30 + container: + image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored + quick-benchmarks: + runs-on: arc-runners-polkadot-sdk + timeout-minutes: 30 + container: + image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet From 76719da221d33117aadf6b7b9cc74e4fbeb25b34 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:24:16 +0200 Subject: [PATCH 08/74] [ci] Update ci image with rust 1.77 and 2024-04-10 (#4077) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cc https://github.com/paritytech/ci_cd/issues/974 --------- Co-authored-by: command-bot <> Co-authored-by: Bastian Köcher --- .github/workflows/fmt-check.yml | 2 +- .github/workflows/test-github-actions.yml | 4 +- .gitlab-ci.yml | 3 +- docs/contributor/container.md | 2 +- .../development_environment_advice.rs | 4 +- polkadot/node/core/runtime-api/src/cache.rs | 17 +- polkadot/node/core/runtime-api/src/lib.rs | 19 +- .../network/approval-distribution/src/lib.rs | 1 + .../src/validator_side/mod.rs | 1 + .../src/sender/send_task.rs | 9 +- .../runtime/parachains/src/inclusion/mod.rs | 47 +- substrate/client/consensus/aura/src/lib.rs | 12 +- .../grandpa/src/communication/tests.rs | 10 +- substrate/frame/contracts/fixtures/build.rs | 6 +- .../tests/benchmark_ui/invalid_origin.stderr | 2 +- .../deprecated_where_block.stderr | 534 +++++++++--------- ...umber_of_pallets_exceeds_tuple_size.stderr | 23 +- .../pallet_error_too_large.stderr | 2 +- .../undefined_call_part.stderr | 2 +- .../undefined_validate_unsigned_part.stderr | 60 +- .../support/test/tests/derive_no_bound.rs | 1 + .../call_argument_invalid_bound.stderr | 2 +- .../call_argument_invalid_bound_2.stderr | 6 +- .../call_argument_invalid_bound_3.stderr | 2 +- ...ev_mode_without_arg_max_encoded_len.stderr | 2 +- .../pallet_ui/event_field_not_member.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 20 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 20 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- .../ui/impl_incorrect_method_signature.stderr | 8 +- ...reference_in_impl_runtime_apis_call.stderr | 12 +- 32 files changed, 413 insertions(+), 426 deletions(-) diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index efcf278c46e8..324c9bfff7a5 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -15,7 +15,7 @@ jobs: os: ["ubuntu-latest"] runs-on: ${{ matrix.os }} container: - image: docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109 + image: docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml index a78420dfe5e7..09cb4a25b9a3 100644 --- a/.github/workflows/test-github-actions.yml +++ b/.github/workflows/test-github-actions.yml @@ -13,7 +13,7 @@ jobs: runs-on: arc-runners-polkadot-sdk timeout-minutes: 30 container: - image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" env: RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: 1 @@ -30,7 +30,7 @@ jobs: runs-on: arc-runners-polkadot-sdk timeout-minutes: 30 container: - image: "docker.io/paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240109" + image: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" env: RUSTFLAGS: "-C debug-assertions -D warnings" RUST_BACKTRACE: "full" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5e57dd86f141..77d31d96ee10 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,8 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + # CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] + CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" diff --git a/docs/contributor/container.md b/docs/contributor/container.md index 9c542f411c81..ec51b8b9d7cc 100644 --- a/docs/contributor/container.md +++ b/docs/contributor/container.md @@ -24,7 +24,7 @@ The command below allows building a Linux binary without having to even install docker run --rm -it \ -w /polkadot-sdk \ -v $(pwd):/polkadot-sdk \ - paritytech/ci-unified:bullseye-1.75.0-2024-01-22-v20240222 \ + docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 \ cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain sudo chown -R $(id -u):$(id -g) target/ ``` diff --git a/docs/sdk/src/reference_docs/development_environment_advice.rs b/docs/sdk/src/reference_docs/development_environment_advice.rs index 21bbe78836c4..9ba95dfa0329 100644 --- a/docs/sdk/src/reference_docs/development_environment_advice.rs +++ b/docs/sdk/src/reference_docs/development_environment_advice.rs @@ -38,7 +38,7 @@ //! // Use nightly formatting. //! // See the polkadot-sdk CI job that checks formatting for the current version used in //! // polkadot-sdk. -//! "rust-analyzer.rustfmt.extraArgs": ["+nightly-2024-01-22"], +//! "rust-analyzer.rustfmt.extraArgs": ["+nightly-2024-04-10"], //! } //! ``` //! @@ -79,7 +79,7 @@ //! # Use nightly formatting. //! # See the polkadot-sdk CI job that checks formatting for the current version used in //! # polkadot-sdk. -//! extraArgs = { "+nightly-2024-01-22" }, +//! extraArgs = { "+nightly-2024-04-10" }, //! }, //! }, //! ``` diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 7cd1f7ce7281..05efbc533d02 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -24,8 +24,8 @@ use polkadot_primitives::{ CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that @@ -561,7 +561,7 @@ pub(crate) enum RequestResult { // The structure of each variant is (relay_parent, [params,]*, result) Authorities(Hash, Vec), Validators(Hash, Vec), - MinimumBackingVotes(Hash, SessionIndex, u32), + MinimumBackingVotes(SessionIndex, u32), ValidatorGroups(Hash, (Vec>, GroupRotationInfo)), AvailabilityCores(Hash, Vec), PersistedValidationData(Hash, ParaId, OccupiedCoreAssumption, Option), @@ -589,19 +589,16 @@ pub(crate) enum RequestResult { FetchOnChainVotes(Hash, Option), PvfsRequirePrecheck(Hash, Vec), // This is a request with side-effects and no result, hence (). - SubmitPvfCheckStatement(Hash, PvfCheckStatement, ValidatorSignature, ()), + #[allow(dead_code)] + SubmitPvfCheckStatement(()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), UnappliedSlashes(Hash, Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>), KeyOwnershipProof(Hash, ValidatorId, Option), // This is a request with side-effects. - SubmitReportDisputeLost( - Hash, - slashing::DisputeProof, - slashing::OpaqueKeyOwnershipProof, - Option<()>, - ), + #[allow(dead_code)] + SubmitReportDisputeLost(Option<()>), ApprovalVotingParams(Hash, SessionIndex, ApprovalVotingParams), DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index b7995aeeee76..c8b1d61e7be7 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -101,7 +101,7 @@ where self.requests_cache.cache_authorities(relay_parent, authorities), Validators(relay_parent, validators) => self.requests_cache.cache_validators(relay_parent, validators), - MinimumBackingVotes(_, session_index, minimum_backing_votes) => self + MinimumBackingVotes(session_index, minimum_backing_votes) => self .requests_cache .cache_minimum_backing_votes(session_index, minimum_backing_votes), ValidatorGroups(relay_parent, groups) => @@ -155,7 +155,7 @@ where self.requests_cache.cache_on_chain_votes(relay_parent, scraped), PvfsRequirePrecheck(relay_parent, pvfs) => self.requests_cache.cache_pvfs_require_precheck(relay_parent, pvfs), - SubmitPvfCheckStatement(_, _, _, ()) => {}, + SubmitPvfCheckStatement(()) => {}, ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), @@ -170,7 +170,7 @@ where .cache_key_ownership_proof((relay_parent, validator_id), key_ownership_proof), RequestResult::ApprovalVotingParams(_relay_parent, session_index, params) => self.requests_cache.cache_approval_voting_params(session_index, params), - SubmitReportDisputeLost(_, _, _, _) => {}, + SubmitReportDisputeLost(_) => {}, DisabledValidators(relay_parent, disabled_validators) => self.requests_cache.cache_disabled_validators(relay_parent, disabled_validators), ParaBackingState(relay_parent, para_id, constraints) => self @@ -370,7 +370,7 @@ where async fn poll_requests(&mut self) { // If there are no active requests, this future should be pending forever. if self.active_requests.len() == 0 { - return futures::pending!() + return futures::pending!(); } // If there are active requests, this will always resolve to `Some(_)` when a request is @@ -439,7 +439,7 @@ where }}; ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr, result = ( $($results:expr),* ) ) => {{ let sender = $sender; - let version: u32 = $version; // enforce type for the version expression + let version: u32 = $version; // enforce type for the version expression let runtime_version = client.api_version_parachain_host(relay_parent).await .unwrap_or_else(|e| { gum::warn!( @@ -570,7 +570,8 @@ where SubmitPvfCheckStatement, submit_pvf_check_statement(stmt, signature), ver = 2, - sender + sender, + result = () ) }, Request::PvfsRequirePrecheck(sender) => { @@ -606,13 +607,15 @@ where SubmitReportDisputeLost, submit_report_dispute_lost(dispute_proof, key_ownership_proof), ver = Request::SUBMIT_REPORT_DISPUTE_LOST_RUNTIME_REQUIREMENT, - sender + sender, + result = () ), Request::MinimumBackingVotes(index, sender) => query!( MinimumBackingVotes, minimum_backing_votes(index), ver = Request::MINIMUM_BACKING_VOTES_RUNTIME_REQUIREMENT, - sender + sender, + result = (index) ), Request::DisabledValidators(sender) => query!( DisabledValidators, diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index d360a18423e6..369d82b45b09 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -148,6 +148,7 @@ enum ApprovalEntryError { InvalidCandidateIndex, DuplicateApproval, UnknownAssignment, + #[allow(dead_code)] AssignmentsFollowedDifferentPaths(RequiredRouting, RequiredRouting), } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index d23279e87541..f7b07133bff4 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -953,6 +953,7 @@ enum AdvertisementError { /// parent. ProtocolMisuse, /// Advertisement is invalid. + #[allow(dead_code)] Invalid(InsertAdvertisementError), } diff --git a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs index 18c66066d162..54ccd10789d0 100644 --- a/polkadot/node/network/dispute-distribution/src/sender/send_task.rs +++ b/polkadot/node/network/dispute-distribution/src/sender/send_task.rs @@ -16,7 +16,7 @@ use std::collections::{HashMap, HashSet}; -use futures::{future::RemoteHandle, Future, FutureExt}; +use futures::{Future, FutureExt}; use polkadot_node_network_protocol::{ request_response::{ @@ -64,7 +64,7 @@ pub struct SendTask { /// Status of a particular vote/statement delivery to a particular validator. enum DeliveryStatus { /// Request is still in flight. - Pending(RemoteHandle<()>), + Pending, /// Succeeded - no need to send request to this peer anymore. Succeeded, } @@ -297,9 +297,8 @@ async fn send_requests( metrics.time_dispute_request(), ); - let (remote, remote_handle) = fut.remote_handle(); - ctx.spawn("dispute-sender", remote.boxed()).map_err(FatalError::SpawnTask)?; - statuses.insert(receiver, DeliveryStatus::Pending(remote_handle)); + ctx.spawn("dispute-sender", fut.boxed()).map_err(FatalError::SpawnTask)?; + statuses.insert(receiver, DeliveryStatus::Pending); } let msg = NetworkBridgeTxMessage::SendRequests(reqs, IfDisconnected::ImmediateError); diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 903d01aa5c9c..76caf740ebca 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -377,22 +377,45 @@ pub mod pallet { const LOG_TARGET: &str = "runtime::inclusion"; /// The reason that a candidate's outputs were rejected for. -#[derive(derive_more::From)] #[cfg_attr(feature = "std", derive(Debug))] -enum AcceptanceCheckErr { +enum AcceptanceCheckErr { HeadDataTooLarge, /// Code upgrades are not permitted at the current time. PrematureCodeUpgrade, /// The new runtime blob is too large. NewCodeTooLarge, /// The candidate violated this DMP acceptance criteria. - ProcessedDownwardMessages(dmp::ProcessedDownwardMessagesAcceptanceErr), + ProcessedDownwardMessages, /// The candidate violated this UMP acceptance criteria. - UpwardMessages(UmpAcceptanceCheckErr), + UpwardMessages, /// The candidate violated this HRMP watermark acceptance criteria. - HrmpWatermark(hrmp::HrmpWatermarkAcceptanceErr), + HrmpWatermark, /// The candidate violated this outbound HRMP acceptance criteria. - OutboundHrmp(hrmp::OutboundHrmpAcceptanceErr), + OutboundHrmp, +} + +impl From for AcceptanceCheckErr { + fn from(_: dmp::ProcessedDownwardMessagesAcceptanceErr) -> Self { + Self::ProcessedDownwardMessages + } +} + +impl From for AcceptanceCheckErr { + fn from(_: UmpAcceptanceCheckErr) -> Self { + Self::UpwardMessages + } +} + +impl From> for AcceptanceCheckErr { + fn from(_: hrmp::HrmpWatermarkAcceptanceErr) -> Self { + Self::HrmpWatermark + } +} + +impl From for AcceptanceCheckErr { + fn from(_: hrmp::OutboundHrmpAcceptanceErr) -> Self { + Self::OutboundHrmp + } } /// An error returned by [`Pallet::check_upward_messages`] that indicates a violation of one of @@ -1145,7 +1168,7 @@ const fn availability_threshold(n_validators: usize) -> usize { supermajority_threshold(n_validators) } -impl AcceptanceCheckErr { +impl AcceptanceCheckErr { /// Returns the same error so that it can be threaded through a needle of `DispatchError` and /// ultimately returned from a `Dispatchable`. fn strip_into_dispatch_err(self) -> Error { @@ -1154,10 +1177,10 @@ impl AcceptanceCheckErr { HeadDataTooLarge => Error::::HeadDataTooLarge, PrematureCodeUpgrade => Error::::PrematureCodeUpgrade, NewCodeTooLarge => Error::::NewCodeTooLarge, - ProcessedDownwardMessages(_) => Error::::IncorrectDownwardMessageHandling, - UpwardMessages(_) => Error::::InvalidUpwardMessages, - HrmpWatermark(_) => Error::::HrmpWatermarkMishandling, - OutboundHrmp(_) => Error::::InvalidOutboundHrmp, + ProcessedDownwardMessages => Error::::IncorrectDownwardMessageHandling, + UpwardMessages => Error::::InvalidUpwardMessages, + HrmpWatermark => Error::::HrmpWatermarkMishandling, + OutboundHrmp => Error::::InvalidOutboundHrmp, } } } @@ -1300,7 +1323,7 @@ impl CandidateCheckContext { upward_messages: &[primitives::UpwardMessage], hrmp_watermark: BlockNumberFor, horizontal_messages: &[primitives::OutboundHrmpMessage], - ) -> Result<(), AcceptanceCheckErr>> { + ) -> Result<(), AcceptanceCheckErr> { ensure!( head_data.0.len() <= self.config.max_head_data_size as _, AcceptanceCheckErr::HeadDataTooLarge, diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index e220aaac508d..2d6264a48929 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -579,15 +579,15 @@ mod tests { type Error = sp_blockchain::Error; struct DummyFactory(Arc); - struct DummyProposer(u64, Arc); + struct DummyProposer(Arc); impl Environment for DummyFactory { type Proposer = DummyProposer; type CreateProposer = futures::future::Ready>; type Error = Error; - fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { - futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) + fn init(&mut self, _: &::Header) -> Self::CreateProposer { + futures::future::ready(Ok(DummyProposer(self.0.clone()))) } } @@ -604,9 +604,9 @@ mod tests { _: Duration, _: Option, ) -> Self::Proposal { - let r = BlockBuilderBuilder::new(&*self.1) - .on_parent_block(self.1.chain_info().best_hash) - .fetch_parent_block_number(&*self.1) + let r = BlockBuilderBuilder::new(&*self.0) + .on_parent_block(self.0.chain_info().best_hash) + .fetch_parent_block_number(&*self.0) .unwrap() .with_inherent_digests(digests) .build() diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index 40d901b2fec6..bc3023fc0281 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -51,10 +51,8 @@ use std::{ #[derive(Debug)] pub(crate) enum Event { - EventStream(TracingUnboundedSender), WriteNotification(PeerId, Vec), Report(PeerId, ReputationChange), - Announce(Hash), } #[derive(Clone)] @@ -146,15 +144,13 @@ impl NetworkEventStream for TestNetwork { &self, _name: &'static str, ) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test", 100_000); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) + futures::stream::pending().boxed() } } impl NetworkBlock> for TestNetwork { - fn announce_block(&self, hash: Hash, _data: Option>) { - let _ = self.sender.unbounded_send(Event::Announce(hash)); + fn announce_block(&self, _: Hash, _data: Option>) { + unimplemented!(); } fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { diff --git a/substrate/frame/contracts/fixtures/build.rs b/substrate/frame/contracts/fixtures/build.rs index 19aff37c1601..baaeaf034203 100644 --- a/substrate/frame/contracts/fixtures/build.rs +++ b/substrate/frame/contracts/fixtures/build.rs @@ -163,7 +163,7 @@ fn invoke_cargo_fmt<'a>( ) -> Result<()> { // If rustfmt is not installed, skip the check. if !Command::new("rustup") - .args(["nightly-2024-01-22", "run", "rustfmt", "--version"]) + .args(["nightly-2024-04-10", "run", "rustfmt", "--version"]) .output() .map_or(false, |o| o.status.success()) { @@ -171,7 +171,7 @@ fn invoke_cargo_fmt<'a>( } let fmt_res = Command::new("rustup") - .args(["nightly-2024-01-22", "run", "rustfmt", "--check", "--config-path"]) + .args(["nightly-2024-04-10", "run", "rustfmt", "--check", "--config-path"]) .arg(config_path) .args(files) .output() @@ -186,7 +186,7 @@ fn invoke_cargo_fmt<'a>( eprintln!("{}\n{}", stdout, stderr); eprintln!( "Fixtures files are not formatted.\n - Please run `rustup nightly-2024-01-22 run rustfmt --config-path {} {}/*.rs`", + Please run `rustup nightly-2024-04-10 run rustfmt --config-path {} {}/*.rs`", config_path.display(), contract_dir.display() ); diff --git a/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr b/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr index 87d4f476a60d..30f1289767fc 100644 --- a/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr +++ b/substrate/frame/support/test/tests/benchmark_ui/invalid_origin.stderr @@ -2,7 +2,7 @@ error[E0277]: the trait bound `::RuntimeOrigin: --> tests/benchmark_ui/invalid_origin.rs:23:1 | 23 | #[benchmarks] - | ^^^^^^^^^^^^^ the trait `From<{integer}>` is not implemented for `::RuntimeOrigin` + | ^^^^^^^^^^^^^ the trait `From<{integer}>` is not implemented for `::RuntimeOrigin`, which is required by `{integer}: Into<_>` | = note: required for `{integer}` to implement `Into<::RuntimeOrigin>` = note: this error originates in the attribute macro `benchmarks` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 09c4d290ef5c..96504b7ce775 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -23,133 +23,139 @@ error: use of deprecated constant `WhereSection::_w`: error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime` + | + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime` | note: required by a bound in `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { | ^^^^^^ required by this bound in `Event` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Clone` --> $RUST/core/src/clone.rs | | pub trait Clone: Sized { | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `EncodeLike` --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Decode` --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` | -note: required because it appears within the type `Event` +note: required because it appears within the type `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { @@ -159,22 +165,21 @@ note: required by a bound in `From` | | pub trait From: Sized { | ^ required by this bound in `From` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `frame_system::Event` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `frame_system::Event`, the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: Sized` | -note: required because it appears within the type `Event` +note: required because it appears within the type `frame_system::Event` --> $WORKSPACE/substrate/frame/system/src/lib.rs | | pub enum Event { @@ -184,22 +189,7 @@ note: required by a bound in `TryInto` | | pub trait TryInto: Sized { | ^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | - | - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -212,134 +202,157 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied error[E0277]: the trait bound `RawOrigin<_>: TryFrom` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `TryFrom` is not implemented for `RawOrigin<_>` | = help: the trait `TryFrom` is implemented for `RawOrigin<::AccountId>` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `Pallet: Callable` | = help: the trait `Callable` is implemented for `Pallet` = note: required for `Pallet` to implement `Callable` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Clone` --> $RUST/core/src/clone.rs | | pub trait Clone: Sized { | ^^^^^ required by this bound in `Clone` - = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `Clone` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `EncodeLike` --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Decode` --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ +note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` + --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | + | type Config; + | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:26:3 @@ -353,165 +366,124 @@ note: required by a bound in `GenesisConfig` | pub struct GenesisConfig { | ^^^^^^ required by this bound in `GenesisConfig` +error[E0277]: the trait bound `Runtime: Config` is not satisfied + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` + | + = note: required for `Pallet` to implement `Callable` +note: required because it appears within the type `RuntimeCall` + --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 + | +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, +... | +27 | | } +28 | | } + | |_^ +note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` + --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs + | + | type Call; + | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Result` --> $RUST/core/src/result.rs | | pub enum Result { | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied in `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ within `RuntimeEvent`, the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeEvent: Sized` | note: required because it appears within the type `RuntimeEvent` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `TryInto` --> $RUST/core/src/convert/mod.rs | | pub trait TryInto: Sized { | ^^^^^ required by this bound in `TryInto` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `RuntimeCall: Sized` | = note: required for `Pallet` to implement `Callable` note: required because it appears within the type `RuntimeCall` --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation +20 | / construct_runtime! { +21 | | pub struct Runtime where +22 | | Block = Block, +23 | | NodeBlock = Block, ... | +27 | | } +28 | | } + | |_^ note: required by a bound in `Result` --> $RUST/core/src/result.rs | | pub enum Result { | ^ required by this bound in `Result` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | - | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | -note: required by a bound in `frame_support::sp_runtime::traits::Dispatchable::Config` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs - | - | type Config; - | ^^^^^^^^^^^^ required by this bound in `Dispatchable::Config` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0277]: the trait bound `Runtime: Config` is not satisfied - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | - | - = note: required for `Pallet` to implement `Callable` -note: required because it appears within the type `RuntimeCall` - --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 - | -20 | // construct_runtime! { -21 | || pub struct Runtime where -22 | || Block = Block, -23 | || NodeBlock = Block, -... || -27 | || } -28 | || } - | ||_- in this macro invocation -... | -note: required by a bound in `frame_support::pallet_prelude::ValidateUnsigned::Call` - --> $WORKSPACE/substrate/primitives/runtime/src/traits.rs - | - | type Call; - | ^^^^^^^^^^ required by this bound in `ValidateUnsigned::Call` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::codec::Decode` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr index 6160f8234a35..dde58ba6959b 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.stderr @@ -5,23 +5,16 @@ error: The number of pallets exceeds the maximum number of tuple elements. To in | ^^^ error: recursion limit reached while expanding `frame_support::__private::tt_return!` - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:22:1 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:66:1 | -22 | / #[frame_support::pallet] -23 | | mod pallet { -24 | | #[pallet::config] -25 | | pub trait Config: frame_system::Config {} +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, ... | -66 | |/ construct_runtime! { -67 | || pub struct Runtime -68 | || { -69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -... || -180 | || } -181 | || } - | ||_^ - | |_| - | in this macro invocation +180 | | } +181 | | } + | |_^ | = help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`$CRATE`) = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr index ebbb9ffb0eb0..75116f719195 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr @@ -10,4 +10,4 @@ error[E0080]: evaluation of constant value failed 97 | | } | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:91:1 | - = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index b9cf58542f20..42e72bc90da7 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,4 +13,4 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 72 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index f527cc2ff773..add71c2197ef 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 72 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:70:3 @@ -26,54 +26,50 @@ error[E0599]: no variant or associated item named `Pallet` found for enum `Runti | || -^^^^^^ variant or associated item not found in `RuntimeCall` | ||________| | | -... | +71 | | } +72 | | } + | |__- variant or associated item `Pallet` not found for this enum error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | -28 | pub struct Pallet(_); - | -------------------- function or associated item `pre_dispatch` not found for this struct +28 | pub struct Pallet(_); + | -------------------- function or associated item `pre_dispatch` not found for this struct ... -66 | construct_runtime! { - | __^ - | | _| - | || -67 | || pub struct Runtime -68 | || { -69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -71 | || } -72 | || } - | ||_- in this macro invocation -... | +66 | construct_runtime! { + | _^ +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | | } +72 | | } + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `pre_dispatch`, perhaps you need to implement one of them: candidate #1: `SignedExtension` candidate #2: `ValidateUnsigned` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `validate_unsigned` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | -28 | pub struct Pallet(_); - | -------------------- function or associated item `validate_unsigned` not found for this struct +28 | pub struct Pallet(_); + | -------------------- function or associated item `validate_unsigned` not found for this struct ... -66 | construct_runtime! { - | __^ - | | _| - | || -67 | || pub struct Runtime -68 | || { -69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -71 | || } -72 | || } - | ||_- in this macro invocation -... | +66 | construct_runtime! { + | _^ +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | | } +72 | | } + | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope = note: the following traits define an item `validate_unsigned`, perhaps you need to implement one of them: candidate #1: `SignedExtension` candidate #2: `ValidateUnsigned` - = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `frame_support::construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index 48a6413c3ac5..b19147078051 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -24,6 +24,7 @@ use frame_support::{ }; #[derive(RuntimeDebugNoBound)] +#[allow(dead_code)] struct Unnamed(u64); #[test] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 40f8f1298304..2a4ceecd8fa4 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -18,7 +18,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar`, which is required by `&::Bar: std::fmt::Debug` = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 5744c6362350..fc993e9ff68f 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -18,7 +18,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar`, which is required by `&::Bar: std::fmt::Debug` = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` @@ -41,7 +41,7 @@ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is | ------------------------ required by a bound introduced by this call ... 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | ^^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar`, which is required by `::Bar: Encode` | = note: required for `::Bar` to implement `Encode` @@ -49,6 +49,6 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is --> tests/pallet_ui/call_argument_invalid_bound_2.rs:38:42 | 38 | pub fn foo(origin: OriginFor, _bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar`, which is required by `::Bar: Decode` | = note: required for `::Bar` to implement `Decode` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index b58e4516bceb..d6486a490794 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -18,7 +18,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` 40 | pub fn foo(origin: OriginFor, _bar: Bar) -> DispatchResultWithPostInfo { | ^^^^ `Bar` cannot be formatted using `{:?}` | - = help: the trait `std::fmt::Debug` is not implemented for `Bar` + = help: the trait `std::fmt::Debug` is not implemented for `Bar`, which is required by `&Bar: std::fmt::Debug` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required for `&Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 02ead305dd81..629fefebbe2c 100644 --- a/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -35,7 +35,7 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied ... | 35 | | #[pallet::storage] 36 | | type MyStorage = StorageValue<_, Vec>; - | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec` + | |__________________^ the trait `MaxEncodedLen` is not implemented for `Vec`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 44660d269060..e9c2eae686ba 100644 --- a/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -16,6 +16,6 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` 41 | B { b: T::Bar }, | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | - = help: the trait `std::fmt::Debug` is not implemented for `::Bar` + = help: the trait `std::fmt::Debug` is not implemented for `::Bar`, which is required by `&::Bar: std::fmt::Debug` = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&&::Bar` to `&dyn std::fmt::Debug` diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index d269e6d2726d..c8c41e805014 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -31,7 +31,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: @@ -58,7 +58,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -81,7 +81,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `TypeInfo` is not implemented for `Bar` + | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: bool @@ -102,7 +102,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -119,7 +119,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: @@ -141,7 +141,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -164,7 +164,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -181,7 +181,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: @@ -203,7 +203,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: Box diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 13d761d65d20..08b35eb8ed15 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -31,7 +31,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `EncodeLike`: @@ -58,7 +58,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: PartialStorageInfoTrait` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -81,7 +81,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `TypeInfo` is not implemented for `Bar` + | |____________^ the trait `TypeInfo` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `TypeInfo`: bool @@ -102,7 +102,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -119,7 +119,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `EncodeLike`: @@ -141,7 +141,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageEntryMetadataBuilder` | = help: the following other types implement trait `WrapperTypeEncode`: Box @@ -164,7 +164,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeDecode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeDecode`: Box @@ -181,7 +181,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `EncodeLike` is not implemented for `Bar` + | |____________^ the trait `EncodeLike` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `EncodeLike`: @@ -203,7 +203,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied 38 | #[pallet::storage] | _______________^ 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | |____________^ the trait `WrapperTypeEncode` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: TryDecodeEntireStorage` | = help: the following other types implement trait `WrapperTypeEncode`: Box diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 504db21feeb2..042a6f67fd31 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied ... | 38 | | #[pallet::storage] 39 | | type Foo = StorageValue<_, Bar>; - | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar` + | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 6fd0b1959c86..9f57b85f3a8a 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -9,7 +9,7 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied ... | 41 | | #[pallet::storage] 42 | | type Foo = StorageNMap<_, Key, u32>; - | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar` + | |____________^ the trait `MaxEncodedLen` is not implemented for `Bar`, which is required by `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, NMapKey, u32>: StorageInfoTrait` | = help: the following other types implement trait `MaxEncodedLen`: bool diff --git a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 788d1807f3ba..535bbb178d5f 100644 --- a/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/substrate/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -9,10 +9,12 @@ note: the struct `RuntimeVersion` is defined here | | use sp_version::RuntimeVersion; | ^^^^^^^^^^^^^^^^^^^^^^^^^^ -help: consider importing one of these items instead +help: consider importing this struct instead + | +37 | fn version() -> sp_version::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ +help: import `RuntimeVersion` directly | -37 | fn version() -> sp_api::__private::RuntimeVersion { - | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 37 | fn version() -> sp_version::RuntimeVersion { | ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index b4df7c068768..f4e0f3b0afb0 100644 --- a/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/substrate/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -9,10 +9,12 @@ note: the struct `RuntimeVersion` is defined here | | use sp_version::RuntimeVersion; | ^^^^^^^^^^^^^^^^^^^^^^^^^^ -help: consider importing one of these items instead +help: consider importing this struct instead + | +39 | fn version() -> sp_version::RuntimeVersion { + | ~~~~~~~~~~~~~~~~~~~~~~~~~~ +help: import `RuntimeVersion` directly | -39 | fn version() -> sp_api::__private::RuntimeVersion { - | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 39 | fn version() -> sp_version::RuntimeVersion { | ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -30,8 +32,8 @@ note: type in trait | 27 | fn test(data: u64); | ^^^ - = note: expected signature `fn(u64)` - found signature `fn(&u64)` + = note: expected signature `fn(_)` + found signature `fn(&_)` error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:33:11 From ff906127ab513bb42a4288968e0f421f630809e0 Mon Sep 17 00:00:00 2001 From: Egor_P Date: Thu, 18 Apr 2024 12:30:31 +0200 Subject: [PATCH 09/74] Improve changelog in the release notes (#4179) This PR adds description to each of the sections of the Changelog part. Changes are based on feedback that it wasn't that clear what exactly `Node Dev`, `Runtime Dev` etc. means. Now, the description for each of those parts is taken directly from the `prdoc` schema. Closes https://github.com/paritytech/release-engineering/issues/197 --- .../release-30_publish_release_draft.yml | 9 +--- scripts/release/build-changelogs.sh | 41 +++++++++++-------- scripts/release/templates/audience.md.tera | 2 + 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/.github/workflows/release-30_publish_release_draft.yml b/.github/workflows/release-30_publish_release_draft.yml index 12891ef70af3..430b1e266467 100644 --- a/.github/workflows/release-30_publish_release_draft.yml +++ b/.github/workflows/release-30_publish_release_draft.yml @@ -42,7 +42,6 @@ jobs: URL=https://github.com/chevdor/tera-cli/releases/download/v0.2.4/tera-cli_linux_amd64.deb wget $URL -O tera.deb sudo dpkg -i tera.deb - tera --version - name: Download artifacts uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 @@ -70,7 +69,7 @@ jobs: export REF1=$(get_latest_release_tag) if [[ -z "${{ inputs.version }}" ]]; then - export REF2="${{ github.ref }}" + export REF2="${{ github.ref_name }}" else export REF2="${{ inputs.version }}" fi @@ -79,10 +78,6 @@ jobs: ./scripts/release/build-changelogs.sh - echo "Checking the folder state" - pwd - ls -la scripts/release - - name: Archive artifact context.json uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 with: @@ -151,5 +146,5 @@ jobs: access_token: ${{ secrets.RELEASENOTES_MATRIX_V2_ACCESS_TOKEN }} server: m.parity.io message: | - **New version of polkadot tagged**: ${{ github.ref }}
+ **New version of polkadot tagged**: ${{ github.ref_name }}
Draft release created: ${{ needs.publish-release-draft.outputs.release_url }} diff --git a/scripts/release/build-changelogs.sh b/scripts/release/build-changelogs.sh index 840543919362..d9a1f11d01e9 100755 --- a/scripts/release/build-changelogs.sh +++ b/scripts/release/build-changelogs.sh @@ -2,11 +2,10 @@ export PRODUCT=polkadot export VERSION=${VERSION:-1.5.0} -export ENGINE=${ENGINE:-docker} +export ENGINE=${ENGINE:-podman} export REF1=${REF1:-'HEAD'} export REF2=${REF2} export RUSTC_STABLE=${RUSTC_STABLE:-'1.0'} -export RUSTC_NIGHTLY=${RUSTC_NIGHTLY:-'1.0'} PROJECT_ROOT=`git rev-parse --show-toplevel` echo $PROJECT_ROOT @@ -27,35 +26,43 @@ echo -e "OUTPUT: \t\t$OUTPUT" mkdir -p $OUTPUT $ENGINE run --rm -v ${PROJECT_ROOT}:/repo paritytech/prdoc load -d "prdoc/$VERSION" --json > $DATA_JSON -# ls -al $DATA_JSON cat $DATA_JSON | jq ' { "prdoc" : .}' > $CONTEXT_JSON -# ls -al $CONTEXT_JSON -# Fetch the list of valid audiences +# Fetch the list of valid audiences and their descriptions SCHEMA_URL=https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json SCHEMA=$(curl -s $SCHEMA_URL | sed 's|^//.*||') -AUDIENCE_ARRAY=$(echo -E $SCHEMA | jq -r '."$defs".audience.oneOf[] | .const') - -readarray -t audiences < <(echo "$AUDIENCE_ARRAY") -declare -p audiences - - -# Generate a changelog -echo "Generating changelog..." -tera -t "${TEMPLATE_CHANGELOG}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/changelog.md" -echo "Changelog ready in $OUTPUT/changelog.md" +aud_desc_array=() +while IFS= read -r line; do + audience=$(jq -r '.const' <<< "$line" ) + description=$(jq -r '.description' <<< "$line") + if [ -n "$audience" ] && [ -n "$description" ]; then + aud_desc_array+=("($audience; $description)") + fi +done < <(jq -c '."$defs".audience_id.oneOf[]' <<< "$SCHEMA") # Generate a release notes doc per audience -for audience in "${audiences[@]}"; do +for tuple in "${aud_desc_array[@]}"; do + audience=$(echo "$tuple" | cut -d ';' -f 1 | sed 's/(//') audience_id="$(tr [A-Z] [a-z] <<< "$audience")" audience_id="$(tr ' ' '_' <<< "$audience_id")" + + description=$(echo "$tuple" | cut -d ';' -f 2 | sed 's/)//') + echo "Processing audience: $audience ($audience_id)" - export TARGET_AUDIENCE=$audience + export TARGET_AUDIENCE="$audience" + export AUDIENCE_DESC="**These changes are relevant to:** $description" + tera -t "${TEMPLATE_AUDIENCE}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_${audience_id}.md" cat "$OUTPUT/relnote_${audience_id}.md" >> "$PROJECT_ROOT/scripts/release/templates/changelog.md" done + +# Generate a changelog containing list of the commits +echo "Generating changelog..." +tera -t "${TEMPLATE_CHANGELOG}" --env --env-key env "${CONTEXT_JSON}" > "$OUTPUT/relnote_commits.md" +echo "Changelog ready in $OUTPUT/relnote_commits.md" + # Show the files tree -s -h -c $OUTPUT/ diff --git a/scripts/release/templates/audience.md.tera b/scripts/release/templates/audience.md.tera index 0b47850e3a37..237643cfa392 100644 --- a/scripts/release/templates/audience.md.tera +++ b/scripts/release/templates/audience.md.tera @@ -1,5 +1,7 @@ ### Changelog for `{{ env.TARGET_AUDIENCE }}` +{{ env.AUDIENCE_DESC }} + {% for file in prdoc -%} {% for doc_item in file.content.doc %} {%- if doc_item.audience == env.TARGET_AUDIENCE %} From 4ddeda1e4177c910be5938227aee711efaf559aa Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:22:00 +0200 Subject: [PATCH 10/74] [ci] Use ci-unified reference (#4196) close https://github.com/paritytech/ci_cd/issues/974 --- .gitlab-ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 77d31d96ee10..5e57dd86f141 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,8 +21,7 @@ workflow: - if: $CI_COMMIT_BRANCH variables: - # CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] - CI_IMAGE: "docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" + CI_IMAGE: !reference [.ci-unified, variables, CI_IMAGE] # BUILDAH_IMAGE is defined in group variables BUILDAH_COMMAND: "buildah --storage-driver overlay2" RELENG_SCRIPTS_BRANCH: "master" From 91d4a207af43f8f81f56e4f24af74f7c6f590148 Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 18 Apr 2024 16:32:14 +0200 Subject: [PATCH 11/74] chain-selection: allow reverting current block (#4103) Block reversion of the current block is technically possible as can be seen from https://github.com/paritytech/polkadot-sdk/blob/39b1f50f1c251def87c1625d68567ed252dc6272/polkadot/runtime/parachains/src/disputes.rs#L1215-L1223 - [x] Fix the test --- polkadot/node/core/chain-selection/src/lib.rs | 6 +-- .../node/core/chain-selection/src/tests.rs | 43 +++++++++++++++++-- .../node/core/chain-selection/src/tree.rs | 42 ++++++++++-------- 3 files changed, 65 insertions(+), 26 deletions(-) diff --git a/polkadot/node/core/chain-selection/src/lib.rs b/polkadot/node/core/chain-selection/src/lib.rs index 6f864fefb611..07c245e839bf 100644 --- a/polkadot/node/core/chain-selection/src/lib.rs +++ b/polkadot/node/core/chain-selection/src/lib.rs @@ -619,7 +619,7 @@ async fn handle_active_leaf( // Extract all reversion logs from a header in ascending order. // -// Ignores logs with number >= the block header number. +// Ignores logs with number > the block header number. fn extract_reversion_logs(header: &Header) -> Vec { let number = header.number; let mut logs = header @@ -639,14 +639,14 @@ fn extract_reversion_logs(header: &Header) -> Vec { None }, - Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), + Ok(Some(ConsensusLog::Revert(b))) if b <= number => Some(b), Ok(Some(ConsensusLog::Revert(b))) => { gum::warn!( target: LOG_TARGET, revert_target = b, block_number = number, block_hash = ?header.hash(), - "Block issued invalid revert digest targeting itself or future" + "Block issued invalid revert digest targeting future" ); None diff --git a/polkadot/node/core/chain-selection/src/tests.rs b/polkadot/node/core/chain-selection/src/tests.rs index bc998f268a0d..1fe87f04cd58 100644 --- a/polkadot/node/core/chain-selection/src/tests.rs +++ b/polkadot/node/core/chain-selection/src/tests.rs @@ -966,19 +966,54 @@ fn ancestor_of_unviable_is_not_leaf_if_has_children() { } #[test] -fn self_and_future_reversions_are_ignored() { +fn self_reversions_are_not_ignored() { test_harness(|backend, _, mut virtual_overseer| async move { let finalized_number = 0; let finalized_hash = Hash::repeat_byte(0); // F <- A1 <- A2 <- A3. // - // A3 reverts itself and future blocks. ignored. + // A3 reverts itself + + let (_, chain_a) = + construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { + if h.number == 3 { + add_reversions(h, vec![3]) + } + }); + + let a2_hash = chain_a.iter().rev().nth(1).unwrap().0.hash(); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ) + .await; + + assert_backend_contains(&backend, chain_a.iter().map(|(h, _)| h)); + assert_leaves(&backend, vec![a2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a2_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn future_reversions_are_ignored() { + test_harness(|backend, _, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts future blocks. ignored. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { if h.number == 3 { - add_reversions(h, vec![3, 4, 100]) + add_reversions(h, vec![4, 100]) } }); @@ -1006,7 +1041,7 @@ fn revert_finalized_is_ignored() { // F <- A1 <- A2 <- A3. // - // A3 reverts itself and future blocks. ignored. + // A3 reverts finalized F and its ancestors. ignored. let (a3_hash, chain_a) = construct_chain_on_base(vec![1, 2, 3], finalized_number, finalized_hash, |h| { diff --git a/polkadot/node/core/chain-selection/src/tree.rs b/polkadot/node/core/chain-selection/src/tree.rs index b4aba30368a6..1eb6c13a7f82 100644 --- a/polkadot/node/core/chain-selection/src/tree.rs +++ b/polkadot/node/core/chain-selection/src/tree.rs @@ -236,7 +236,7 @@ fn propagate_viability_update( Ok(()) } -/// Imports a new block and applies any reversions to ancestors. +/// Imports a new block and applies any reversions to ancestors or the block itself. pub(crate) fn import_block( backend: &mut OverlayedBackend, block_hash: Hash, @@ -246,25 +246,29 @@ pub(crate) fn import_block( weight: BlockWeight, stagnant_at: Timestamp, ) -> Result<(), Error> { - add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?; - apply_ancestor_reversions(backend, block_hash, block_number, reversion_logs)?; + let block_entry = + add_block(backend, block_hash, block_number, parent_hash, weight, stagnant_at)?; + apply_reversions(backend, block_entry, reversion_logs)?; Ok(()) } // Load the given ancestor's block entry, in descending order from the `block_hash`. -// The ancestor_number must be at least one block less than the `block_number`. +// The ancestor_number must be not higher than the `block_entry`'s. // // The returned entry will be `None` if the range is invalid or any block in the path had // no entry present. If any block entry was missing, it can safely be assumed to // be finalized. fn load_ancestor( backend: &mut OverlayedBackend, - block_hash: Hash, - block_number: BlockNumber, + block_entry: &BlockEntry, ancestor_number: BlockNumber, ) -> Result, Error> { - if block_number <= ancestor_number { + let block_hash = block_entry.block_hash; + let block_number = block_entry.block_number; + if block_number == ancestor_number { + return Ok(Some(block_entry.clone())) + } else if block_number < ancestor_number { return Ok(None) } @@ -300,7 +304,7 @@ fn add_block( parent_hash: Hash, weight: BlockWeight, stagnant_at: Timestamp, -) -> Result<(), Error> { +) -> Result { let mut leaves = backend.load_leaves()?; let parent_entry = backend.load_block_entry(&parent_hash)?; @@ -308,7 +312,7 @@ fn add_block( parent_entry.as_ref().and_then(|parent| parent.non_viable_ancestor_for_child()); // 1. Add the block to the DB assuming it's not reverted. - backend.write_block_entry(BlockEntry { + let block_entry = BlockEntry { block_hash, block_number, parent_hash, @@ -319,7 +323,8 @@ fn add_block( approval: Approval::Unapproved, }, weight, - }); + }; + backend.write_block_entry(block_entry.clone()); // 2. Update leaves if inherited viability is fine. if inherited_viability.is_none() { @@ -344,26 +349,25 @@ fn add_block( stagnant_at_list.push(block_hash); backend.write_stagnant_at(stagnant_at, stagnant_at_list); - Ok(()) + Ok(block_entry) } /// Assuming that a block is already imported, accepts the number of the block /// as well as a list of reversions triggered by the block in ascending order. -fn apply_ancestor_reversions( +fn apply_reversions( backend: &mut OverlayedBackend, - block_hash: Hash, - block_number: BlockNumber, + block_entry: BlockEntry, reversions: Vec, ) -> Result<(), Error> { // Note: since revert numbers are in ascending order, the expensive propagation // of unviability is only heavy on the first log. for revert_number in reversions { - let maybe_block_entry = load_ancestor(backend, block_hash, block_number, revert_number)?; - if let Some(block_entry) = &maybe_block_entry { + let maybe_block_entry = load_ancestor(backend, &block_entry, revert_number)?; + if let Some(entry) = &maybe_block_entry { gum::trace!( target: LOG_TARGET, ?revert_number, - revert_hash = ?block_entry.block_hash, + revert_hash = ?entry.block_hash, "Block marked as reverted via scraped on-chain reversions" ); } @@ -372,8 +376,8 @@ fn apply_ancestor_reversions( maybe_block_entry, None, revert_number, - Some(block_hash), - Some(block_number), + Some(block_entry.block_hash), + Some(block_entry.block_number), )?; } From 9f12d2196e156e8822e5373975644aacfc266d14 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:57:22 +0200 Subject: [PATCH 12/74] [ci] Use native git cli in cargo (#4200) More details can be found [here](https://github.com/paritytech/ci_cd/issues/939#issuecomment-2064061845) --- .github/workflows/test-github-actions.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml index 09cb4a25b9a3..c8ce49cb462b 100644 --- a/.github/workflows/test-github-actions.yml +++ b/.github/workflows/test-github-actions.yml @@ -8,6 +8,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true +env: + CARGO_NET_GIT_FETCH_WITH_CLI: true + jobs: test-linux-stable-int: runs-on: arc-runners-polkadot-sdk From 0e552893d0f656f83d366ae9118aaeb0f898aabf Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:57:23 +0300 Subject: [PATCH 13/74] Fix next_retry busy waiting on first retry (#4192) The `next_retry_time` gets populated when a request receives an error timeout or any other error, after thatn next_retry would check all requests in the queue returns the smallest one, which then gets used to move the main loop by creating a Delay ``` futures_timer::Delay::new(instant.saturating_duration_since(Instant::now())).await, ``` However when we retry a task for the first time we still keep it in the queue an mark it as in flight so its next_retry_time would be the oldest and it would be small than `now`, so the Delay will always triggers, so that would make the main loop essentially busy wait untill we received a response for the retry request. Fix this by excluding the tasks that are already in-flight. --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- .../statement-distribution/src/v2/requests.rs | 2 +- .../src/v2/tests/requests.rs | 26 ++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/requests.rs b/polkadot/node/network/statement-distribution/src/v2/requests.rs index fe270c8a58e8..1ed18ffd42a9 100644 --- a/polkadot/node/network/statement-distribution/src/v2/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/requests.rs @@ -288,7 +288,7 @@ impl RequestManager { /// Returns an instant at which the next request to be retried will be ready. pub fn next_retry_time(&mut self) -> Option { let mut next = None; - for (_id, request) in &self.requests { + for (_id, request) in self.requests.iter().filter(|(_id, request)| !request.in_flight) { if let Some(next_retry_time) = request.next_retry_time { if next.map_or(true, |next| next_retry_time < next) { next = Some(next_retry_time); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index dc2c8f55290b..8cf139802148 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -2606,7 +2606,31 @@ fn should_delay_before_retrying_dropped_requests() { // Sleep for the given amount of time. This should reset the delay for the first candidate. futures_timer::Delay::new(REQUEST_RETRY_DELAY).await; - // We re-try the first request. + // We re-try the first request the second time drop it again. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateV2(outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1); + assert_eq!(outgoing.payload.mask, mask); + } + ); + } + ); + + assert_matches!( + overseer_recv_with_timeout(&mut overseer, Duration::from_millis(100)).await, + None + ); + + // Sleep for the given amount of time. This should reset the delay for the first candidate. + futures_timer::Delay::new(REQUEST_RETRY_DELAY).await; + + // We re-try the first request, for the third time, so let's answer to it. { let statements = vec![ state From 37e338f0469c2ca5b716c4423d8b683e237ead21 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:57:34 +0300 Subject: [PATCH 14/74] approval-voting: Make sure we always mark approved candidates approved in a different relay chain context (#4153) ... see for more detail why this is needed https://github.com/paritytech/polkadot-sdk/issues/4149#issuecomment-2058472444 ## TODO: - [x] Unittests - [x] Replicate scenario from https://github.com/paritytech/polkadot-sdk/issues/4149 and confirm this fixes it: https://github.com/paritytech/polkadot-sdk/issues/4149 [ Replicated on a zombienet with some hacked nodes, that we can end up in this state where no-wake is schedule and the nodes are pending new assignments] --------- Signed-off-by: Alexandru Gheorghe Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- polkadot/node/core/approval-voting/src/lib.rs | 51 +++++ .../node/core/approval-voting/src/tests.rs | 182 +++++++++++++++++- 2 files changed, 232 insertions(+), 1 deletion(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 7ecc2b2595bc..b5ed92fa39c8 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -978,6 +978,7 @@ where woken_block, woken_candidate, &subsystem.metrics, + &wakeups, ).await? } next_msg = ctx.recv().fuse() => { @@ -1152,6 +1153,7 @@ async fn handle_actions( candidate_hash, delayed_approvals_timers, approval_request, + &wakeups, ) .await? .into_iter() @@ -1663,6 +1665,7 @@ async fn handle_from_overseer( |r| { let _ = res.send(r); }, + &wakeups, ) .await? .0, @@ -2477,6 +2480,7 @@ async fn check_and_import_approval( metrics: &Metrics, approval: IndirectSignedApprovalVoteV2, with_response: impl FnOnce(ApprovalCheckResult) -> T, + wakeups: &Wakeups, ) -> SubsystemResult<(Vec, T)> where Sender: SubsystemSender, @@ -2655,6 +2659,7 @@ where approved_candidate_hash, candidate_entry, ApprovalStateTransition::RemoteApproval(approval.validator), + wakeups, ) .await; actions.extend(new_actions); @@ -2689,6 +2694,10 @@ impl ApprovalStateTransition { ApprovalStateTransition::WakeupProcessed => false, } } + + fn is_remote_approval(&self) -> bool { + matches!(*self, ApprovalStateTransition::RemoteApproval(_)) + } } // Advance the approval state, either by importing an approval vote which is already checked to be @@ -2705,6 +2714,7 @@ async fn advance_approval_state( candidate_hash: CandidateHash, mut candidate_entry: CandidateEntry, transition: ApprovalStateTransition, + wakeups: &Wakeups, ) -> Vec where Sender: SubsystemSender, @@ -2835,6 +2845,43 @@ where status.required_tranches, )); + if is_approved && transition.is_remote_approval() { + // Make sure we wake other blocks in case they have + // a no-show that might be covered by this approval. + for (fork_block_hash, fork_approval_entry) in candidate_entry + .block_assignments + .iter() + .filter(|(hash, _)| **hash != block_hash) + { + let assigned_on_fork_block = validator_index + .as_ref() + .map(|validator_index| fork_approval_entry.is_assigned(*validator_index)) + .unwrap_or_default(); + if wakeups.wakeup_for(*fork_block_hash, candidate_hash).is_none() && + !fork_approval_entry.is_approved() && + assigned_on_fork_block + { + let fork_block_entry = db.load_block_entry(fork_block_hash); + if let Ok(Some(fork_block_entry)) = fork_block_entry { + actions.push(Action::ScheduleWakeup { + block_hash: *fork_block_hash, + block_number: fork_block_entry.block_number(), + candidate_hash, + // Schedule the wakeup next tick, since the assignment must be a + // no-show, because there is no-wakeup scheduled. + tick: tick_now + 1, + }) + } else { + gum::debug!( + target: LOG_TARGET, + ?fork_block_entry, + ?fork_block_hash, + "Failed to load block entry" + ) + } + } + } + } // We have no need to write the candidate entry if all of the following // is true: // @@ -2896,6 +2943,7 @@ async fn process_wakeup( relay_block: Hash, candidate_hash: CandidateHash, metrics: &Metrics, + wakeups: &Wakeups, ) -> SubsystemResult> { let mut span = state .spans @@ -3064,6 +3112,7 @@ async fn process_wakeup( candidate_hash, candidate_entry, ApprovalStateTransition::WakeupProcessed, + wakeups, ) .await, ); @@ -3294,6 +3343,7 @@ async fn issue_approval( candidate_hash: CandidateHash, delayed_approvals_timers: &mut DelayedApprovalTimer, ApprovalVoteRequest { validator_index, block_hash }: ApprovalVoteRequest, + wakeups: &Wakeups, ) -> SubsystemResult> { let mut issue_approval_span = state .spans @@ -3415,6 +3465,7 @@ async fn issue_approval( candidate_hash, candidate_entry, ApprovalStateTransition::LocalApproval(validator_index as _), + wakeups, ) .await; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index f7bbbca4b8a1..312d805bbefb 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -834,7 +834,6 @@ impl ChainBuilder { cur_hash = cur_header.parent_hash; } ancestry.reverse(); - import_block( overseer, ancestry.as_ref(), @@ -1922,6 +1921,187 @@ fn subsystem_assignment_import_updates_candidate_entry_and_schedules_wakeup() { }); } +#[test] +fn subsystem_always_has_a_wakeup_when_pending() { + // Approvals sent after all assignments are no-show, the approval + // should be counted on the fork relay chain on the next tick. + test_approvals_on_fork_are_always_considered_after_no_show( + 30, + vec![(29, false), (30, false), (31, true)], + ); + // Approvals sent before fork no-shows, the approval + // should be counted on the fork relay chain when it no-shows. + test_approvals_on_fork_are_always_considered_after_no_show( + 8, // a tick smaller than the no-show tick which is 30. + vec![(7, false), (8, false), (29, false), (30, true), (31, true)], + ); +} + +fn test_approvals_on_fork_are_always_considered_after_no_show( + tick_to_send_approval: Tick, + expected_approval_status: Vec<(Tick, bool)>, +) { + let config = HarnessConfig::default(); + let store = config.backend(); + + test_harness(config, |test_harness| async move { + let TestHarness { + mut virtual_overseer, + clock, + sync_oracle_handle: _sync_oracle_handle, + .. + } = test_harness; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(rx)) => { + rx.send(Ok(0)).unwrap(); + } + ); + let candidate_hash = Hash::repeat_byte(0x04); + + let candidate_descriptor = make_candidate(ParaId::from(1_u32), &candidate_hash); + let candidate_hash = candidate_descriptor.hash(); + + let block_hash = Hash::repeat_byte(0x01); + let block_hash_fork = Hash::repeat_byte(0x02); + + let candidate_index = 0; + let validator = ValidatorIndex(0); + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Eve, + ]; + // Add block hash 0x01 and for 0x02 + ChainBuilder::new() + .add_block( + block_hash, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(1), + candidates: Some(vec![( + candidate_descriptor.clone(), + CoreIndex(0), + GroupIndex(0), + )]), + session_info: Some(SessionInfo { + validator_groups: IndexedVec::>::from( + vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ], + ), + needed_approvals: 1, + ..session_info(&validators) + }), + end_syncing: false, + }, + ) + .add_block( + block_hash_fork, + ChainBuilder::GENESIS_HASH, + 1, + BlockConfig { + slot: Slot::from(1), + candidates: Some(vec![(candidate_descriptor, CoreIndex(0), GroupIndex(0))]), + session_info: Some(SessionInfo { + validator_groups: IndexedVec::>::from( + vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2)], + vec![ValidatorIndex(3), ValidatorIndex(4)], + ], + ), + needed_approvals: 1, + ..session_info(&validators) + }), + end_syncing: false, + }, + ) + .build(&mut virtual_overseer) + .await; + + // Send assignments for the same candidate on both forks + let rx = check_and_import_assignment( + &mut virtual_overseer, + block_hash, + candidate_index, + validator, + ) + .await; + assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted)); + + let rx = check_and_import_assignment( + &mut virtual_overseer, + block_hash_fork, + candidate_index, + validator, + ) + .await; + + assert_eq!(rx.await, Ok(AssignmentCheckResult::Accepted)); + // Wake on APPROVAL_DELAY first + assert!(clock.inner.lock().current_wakeup_is(2)); + clock.inner.lock().set_tick(2); + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Wake up on no-show + assert!(clock.inner.lock().current_wakeup_is(30)); + + for (tick, status) in expected_approval_status + .iter() + .filter(|(tick, _)| *tick < tick_to_send_approval) + { + // Wake up on no-show + clock.inner.lock().set_tick(*tick); + futures_timer::Delay::new(Duration::from_millis(100)).await; + let block_entry = store.load_block_entry(&block_hash).unwrap().unwrap(); + let block_entry_fork = store.load_block_entry(&block_hash_fork).unwrap().unwrap(); + assert!(!block_entry.is_fully_approved()); + assert_eq!(block_entry_fork.is_fully_approved(), *status); + } + + clock.inner.lock().set_tick(tick_to_send_approval); + futures_timer::Delay::new(Duration::from_millis(100)).await; + + // Send the approval for candidate just in the context of 0x01 block. + let rx = check_and_import_approval( + &mut virtual_overseer, + block_hash, + candidate_index, + validator, + candidate_hash, + 1, + false, + None, + ) + .await; + + assert_eq!(rx.await, Ok(ApprovalCheckResult::Accepted),); + + // Check approval status for the fork_block is correctly transitioned. + for (tick, status) in expected_approval_status + .iter() + .filter(|(tick, _)| *tick >= tick_to_send_approval) + { + // Wake up on no-show + clock.inner.lock().set_tick(*tick); + futures_timer::Delay::new(Duration::from_millis(100)).await; + let block_entry = store.load_block_entry(&block_hash).unwrap().unwrap(); + let block_entry_fork = store.load_block_entry(&block_hash_fork).unwrap().unwrap(); + assert!(block_entry.is_fully_approved()); + assert_eq!(block_entry_fork.is_fully_approved(), *status); + } + + virtual_overseer + }); +} + #[test] fn subsystem_process_wakeup_schedules_wakeup() { test_harness(HarnessConfig::default(), |test_harness| async move { From c891fdabf4d519b25829490723fb70b1a2ffc0e5 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 18 Apr 2024 18:57:44 +0300 Subject: [PATCH 15/74] tx: Remove tx_broadcast transaction from the pool (#4050) This PR ensures that broadcast future cleans-up the submitted extrinsic from the pool, iff the `broadcast_stop` operation has been called. This effectively cleans-up transactions from the pool when the `broadcast_stop` is called. cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- .../tests/transaction_broadcast_tests.rs | 9 ++--- .../src/transaction/transaction_broadcast.rs | 40 +++++++++++++------ 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 77a28968aedf..14e188b6a873 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -94,7 +94,7 @@ async fn tx_broadcast_enters_pool() { #[tokio::test] async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, mut exec_middleware, _) = setup_api(Default::default()); + let (_, pool, _, tx_api, exec_middleware, _) = setup_api(Default::default()); // Invalid parameters. let err = tx_api @@ -114,13 +114,10 @@ async fn tx_broadcast_invalid_tx() { assert_eq!(0, pool.status().ready); - // Await the broadcast future to exit. - // Without this we'd be subject to races, where we try to call the stop before the tx is - // dropped. - let _ = get_next_event!(&mut exec_middleware.recv); + // The broadcast future should never be spawned when the tx decoding fails. assert_eq!(0, exec_middleware.num_tasks()); - // The broadcast future was dropped, and the operation is no longer active. + // The operation ID is no longer active. // When the operation is not active, either from the tx being finalized or a // terminal error; the stop method should return an error. let err = tx_api diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index 6eaf50d6b2e2..ef1a426865d5 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -37,7 +37,7 @@ use std::{collections::HashMap, sync::Arc}; use super::error::ErrorBroadcast; /// An API for transaction RPC calls. -pub struct TransactionBroadcast { +pub struct TransactionBroadcast { /// Substrate client. client: Arc, /// Transactions pool. @@ -45,16 +45,18 @@ pub struct TransactionBroadcast { /// Executor to spawn subscriptions. executor: SubscriptionTaskExecutor, /// The broadcast operation IDs. - broadcast_ids: Arc>>, + broadcast_ids: Arc>>>, } /// The state of a broadcast operation. -struct BroadcastState { +struct BroadcastState { /// Handle to abort the running future that broadcasts the transaction. handle: AbortHandle, + /// Associated tx hash. + tx_hash: ::Hash, } -impl TransactionBroadcast { +impl TransactionBroadcast { /// Creates a new [`TransactionBroadcast`]. pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { TransactionBroadcast { client, pool, executor, broadcast_ids: Default::default() } @@ -106,17 +108,22 @@ where // The unique ID of this operation. let id = self.generate_unique_id(); + // The JSON-RPC server might check whether the transaction is valid before broadcasting it. + // If it does so and if the transaction is invalid, the server should silently do nothing + // and the JSON-RPC client is not informed of the problem. Invalid transactions should still + // count towards the limit to the number of simultaneously broadcasted transactions. + let Ok(decoded_extrinsic) = TransactionFor::::decode(&mut &bytes[..]) else { + return Ok(Some(id)); + }; + // Save the tx hash to remove it later. + let tx_hash = pool.hash_of(&decoded_extrinsic); + let mut best_block_import_stream = Box::pin(self.client.import_notification_stream().filter_map( |notification| async move { notification.is_new_best.then_some(notification.hash) }, )); let broadcast_transaction_fut = async move { - // There is nothing we could do with an extrinsic of invalid format. - let Ok(decoded_extrinsic) = TransactionFor::::decode(&mut &bytes[..]) else { - return; - }; - // Flag to determine if the we should broadcast the transaction again. let mut is_done = false; @@ -169,17 +176,26 @@ where let (fut, handle) = futures::future::abortable(broadcast_transaction_fut); let broadcast_ids = self.broadcast_ids.clone(); let drop_id = id.clone(); + let pool = self.pool.clone(); // The future expected by the executor must be `Future` instead of // `Future>`. - let fut = fut.map(move |_| { + let fut = fut.map(move |result| { // Remove the entry from the broadcast IDs map. - broadcast_ids.write().remove(&drop_id); + let Some(broadcast_state) = broadcast_ids.write().remove(&drop_id) else { return }; + + // The broadcast was not stopped. + if result.is_ok() { + return + } + + // Best effort pool removal (tx can already be finalized). + pool.remove_invalid(&[broadcast_state.tx_hash]); }); // Keep track of this entry and the abortable handle. { let mut broadcast_ids = self.broadcast_ids.write(); - broadcast_ids.insert(id.clone(), BroadcastState { handle }); + broadcast_ids.insert(id.clone(), BroadcastState { handle, tx_hash }); } sc_rpc::utils::spawn_subscription_task(&self.executor, fut); From 88a2f360238787bf5256cfdd14b40c08f519b38e Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Thu, 18 Apr 2024 20:19:04 +0300 Subject: [PATCH 16/74] chainHead: Stabilize chainHead to version 1 (#4168) This PR stabilizes the chainHead API to version 1. Needs: - https://github.com/paritytech/polkadot-sdk/pull/3667 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_4168.prdoc | 8 + .../client/rpc-spec-v2/src/chain_head/api.rs | 26 +- .../src/chain_head/subscription/inner.rs | 2 +- .../rpc-spec-v2/src/chain_head/tests.rs | 225 ++++++++---------- 4 files changed, 118 insertions(+), 143 deletions(-) create mode 100644 prdoc/pr_4168.prdoc diff --git a/prdoc/pr_4168.prdoc b/prdoc/pr_4168.prdoc new file mode 100644 index 000000000000..9a498500f08b --- /dev/null +++ b/prdoc/pr_4168.prdoc @@ -0,0 +1,8 @@ +title: Stabilize chianHead RPC class to version 1 + +doc: + - audience: Node Dev + description: | + The chainHead RPC API is stabilized to version 1. + +crates: [ ] diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 3851adac2644..23cb0bbf5458 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -37,15 +37,15 @@ pub trait ChainHeadApi { /// /// This method is unstable and subject to change in the future. #[subscription( - name = "chainHead_unstable_follow" => "chainHead_unstable_followEvent", - unsubscribe = "chainHead_unstable_unfollow", + name = "chainHead_v1_follow" => "chainHead_v1_followEvent", + unsubscribe = "chainHead_v1_unfollow", item = FollowEvent, )] fn chain_head_unstable_follow(&self, with_runtime: bool); /// Retrieves the body (list of transactions) of a pinned block. /// - /// This method should be seen as a complement to `chainHead_unstable_follow`, + /// This method should be seen as a complement to `chainHead_v1_follow`, /// allowing the JSON-RPC client to retrieve more information about a block /// that has been reported. /// @@ -54,7 +54,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_body", raw_method)] + #[method(name = "chainHead_v1_body", raw_method)] async fn chain_head_unstable_body( &self, follow_subscription: String, @@ -63,7 +63,7 @@ pub trait ChainHeadApi { /// Retrieves the header of a pinned block. /// - /// This method should be seen as a complement to `chainHead_unstable_follow`, + /// This method should be seen as a complement to `chainHead_v1_follow`, /// allowing the JSON-RPC client to retrieve more information about a block /// that has been reported. /// @@ -73,7 +73,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_header", raw_method)] + #[method(name = "chainHead_v1_header", raw_method)] async fn chain_head_unstable_header( &self, follow_subscription: String, @@ -85,7 +85,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_storage", raw_method)] + #[method(name = "chainHead_v1_storage", raw_method)] async fn chain_head_unstable_storage( &self, follow_subscription: String, @@ -99,7 +99,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_call", raw_method)] + #[method(name = "chainHead_v1_call", raw_method)] async fn chain_head_unstable_call( &self, follow_subscription: String, @@ -118,7 +118,7 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_unpin", raw_method)] + #[method(name = "chainHead_v1_unpin", raw_method)] async fn chain_head_unstable_unpin( &self, follow_subscription: String, @@ -131,21 +131,21 @@ pub trait ChainHeadApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_continue", raw_method)] + #[method(name = "chainHead_v1_continue", raw_method)] async fn chain_head_unstable_continue( &self, follow_subscription: String, operation_id: String, ) -> Result<(), Error>; - /// Stops an operation started with chainHead_unstable_body, chainHead_unstable_call, or - /// chainHead_unstable_storage. If the operation was still in progress, this interrupts it. If + /// Stops an operation started with chainHead_v1_body, chainHead_v1_call, or + /// chainHead_v1_storage. If the operation was still in progress, this interrupts it. If /// the operation was already finished, this call has no effect. /// /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_stopOperation", raw_method)] + #[method(name = "chainHead_v1_stopOperation", raw_method)] async fn chain_head_unstable_stop_operation( &self, follow_subscription: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 0e5ccb91d39a..3495d9e54490 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -186,7 +186,7 @@ impl OperationState { /// Stops the operation if `waitingForContinue` event was emitted for the associated /// operation ID. /// - /// Returns nothing in accordance with `chainHead_unstable_stopOperation`. + /// Returns nothing in accordance with `chainHead_v1_stopOperation`. pub fn stop_operation(&self) { // `waitingForContinue` not generated. if !self.shared_state.requested_continue.load(std::sync::atomic::Ordering::Acquire) { diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 14f664858a0d..4bab2194e082 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -156,7 +156,7 @@ async fn setup_api() -> ( ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -266,7 +266,7 @@ async fn follow_subscription_produces_blocks() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -337,7 +337,7 @@ async fn follow_with_runtime() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -448,14 +448,14 @@ async fn get_header() { // Invalid subscription ID must produce no results. let res: Option = api - .call("chainHead_unstable_header", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_header", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); assert!(res.is_none()); // Valid subscription with invalid block hash will error. let err = api - .call::<_, serde_json::Value>("chainHead_unstable_header", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>("chainHead_v1_header", [&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -463,7 +463,7 @@ async fn get_header() { ); // Obtain the valid header. - let res: String = api.call("chainHead_unstable_header", [&sub_id, &block_hash]).await.unwrap(); + let res: String = api.call("chainHead_v1_header", [&sub_id, &block_hash]).await.unwrap(); let bytes = array_bytes::hex2bytes(&res).unwrap(); let header: Header = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(header, block.header); @@ -476,15 +476,13 @@ async fn get_body() { let invalid_hash = hex_string(&INVALID_HASH); // Subscription ID is invalid. - let response: MethodResponse = api - .call("chainHead_unstable_body", ["invalid_sub_id", &invalid_hash]) - .await - .unwrap(); + let response: MethodResponse = + api.call("chainHead_v1_body", ["invalid_sub_id", &invalid_hash]).await.unwrap(); assert_matches!(response, MethodResponse::LimitReached); // Block hash is invalid. let err = api - .call::<_, serde_json::Value>("chainHead_unstable_body", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>("chainHead_v1_body", [&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -493,7 +491,7 @@ async fn get_body() { // Valid call. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id, &block_hash]).await.unwrap(); let operation_id = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -534,7 +532,7 @@ async fn get_body() { // Valid call to a block with extrinsics. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id, &block_hash]).await.unwrap(); let operation_id = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -556,10 +554,7 @@ async fn call_runtime() { // Subscription ID is invalid. let response: MethodResponse = api - .call( - "chainHead_unstable_call", - ["invalid_sub_id", &block_hash, "BabeApi_current_epoch", "0x00"], - ) + .call("chainHead_v1_call", ["invalid_sub_id", &block_hash, "BabeApi_current_epoch", "0x00"]) .await .unwrap(); assert_matches!(response, MethodResponse::LimitReached); @@ -567,7 +562,7 @@ async fn call_runtime() { // Block hash is invalid. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &invalid_hash, "BabeApi_current_epoch", "0x00"], ) .await @@ -579,7 +574,7 @@ async fn call_runtime() { // Pass an invalid parameters that cannot be decode. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_call", + "chainHead_v1_call", // 0x0 is invalid. [&sub_id, &block_hash, "BabeApi_current_epoch", "0x0"], ) @@ -595,7 +590,7 @@ async fn call_runtime() { let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -614,7 +609,7 @@ async fn call_runtime() { // The `current_epoch` takes no parameters and not draining the input buffer // will cause the execution to fail. let response: MethodResponse = api - .call("chainHead_unstable_call", [&sub_id, &block_hash, "BabeApi_current_epoch", "0x00"]) + .call("chainHead_v1_call", [&sub_id, &block_hash, "BabeApi_current_epoch", "0x00"]) .await .unwrap(); let operation_id = match response { @@ -651,7 +646,7 @@ async fn call_runtime_without_flag() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -685,7 +680,7 @@ async fn call_runtime_without_flag() { let call_parameters = hex_string(&alice_id.encode()); let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -706,7 +701,7 @@ async fn get_storage_hash() { // Subscription ID is invalid. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ "invalid_sub_id", &invalid_hash, @@ -720,7 +715,7 @@ async fn get_storage_hash() { // Block hash is invalid. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &invalid_hash, @@ -736,7 +731,7 @@ async fn get_storage_hash() { // Valid call without storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -779,7 +774,7 @@ async fn get_storage_hash() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -812,7 +807,7 @@ async fn get_storage_hash() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &genesis_hash, @@ -869,7 +864,7 @@ async fn get_storage_multi_query_iter() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -920,7 +915,7 @@ async fn get_storage_multi_query_iter() { let expected_value = hex_string(&CHILD_VALUE); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &genesis_hash, @@ -974,7 +969,7 @@ async fn get_storage_value() { // Subscription ID is invalid. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ "invalid_sub_id", &invalid_hash, @@ -988,7 +983,7 @@ async fn get_storage_value() { // Block hash is invalid. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &invalid_hash, @@ -1004,7 +999,7 @@ async fn get_storage_value() { // Valid call without storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1047,7 +1042,7 @@ async fn get_storage_value() { // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1079,7 +1074,7 @@ async fn get_storage_value() { let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &genesis_hash, @@ -1121,7 +1116,7 @@ async fn get_storage_non_queryable_key() { let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1146,7 +1141,7 @@ async fn get_storage_non_queryable_key() { let prefixed_key = hex_string(&prefixed_key); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1171,7 +1166,7 @@ async fn get_storage_non_queryable_key() { let prefixed_key = hex_string(&prefixed_key); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1197,7 +1192,7 @@ async fn get_storage_non_queryable_key() { let prefixed_key = hex_string(&prefixed_key); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1227,9 +1222,9 @@ async fn unique_operation_ids() { // Ensure that operation IDs are unique for multiple method calls. for _ in 0..5 { - // Valid `chainHead_unstable_body` call. + // Valid `chainHead_v1_body` call. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id, &block_hash]).await.unwrap(); let operation_id = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -1241,11 +1236,11 @@ async fn unique_operation_ids() { // Ensure uniqueness. assert!(op_ids.insert(operation_id)); - // Valid `chainHead_unstable_storage` call. + // Valid `chainHead_v1_storage` call. let key = hex_string(&KEY); let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -1266,12 +1261,12 @@ async fn unique_operation_ids() { // Ensure uniqueness. assert!(op_ids.insert(operation_id)); - // Valid `chainHead_unstable_call` call. + // Valid `chainHead_v1_call` call. let alice_id = AccountKeyring::Alice.to_account_id(); let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -1313,12 +1308,11 @@ async fn separate_operation_ids_for_subscriptions() { .into_rpc(); // Create two separate subscriptions. - let mut sub_first = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub_first = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id_first = sub_first.subscription_id(); let sub_id_first = serde_json::to_string(&sub_id_first).unwrap(); - let mut sub_second = - api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub_second = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id_second = sub_second.subscription_id(); let sub_id_second = serde_json::to_string(&sub_id_second).unwrap(); @@ -1362,17 +1356,15 @@ async fn separate_operation_ids_for_subscriptions() { // Each `chainHead_follow` subscription receives a separate operation ID. let response: MethodResponse = - api.call("chainHead_unstable_body", [&sub_id_first, &block_hash]).await.unwrap(); + api.call("chainHead_v1_body", [&sub_id_first, &block_hash]).await.unwrap(); let operation_id: String = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), }; assert_eq!(operation_id, "0"); - let response: MethodResponse = api - .call("chainHead_unstable_body", [&sub_id_second, &block_hash]) - .await - .unwrap(); + let response: MethodResponse = + api.call("chainHead_v1_body", [&sub_id_second, &block_hash]).await.unwrap(); let operation_id_second: String = match response { MethodResponse::Started(started) => started.operation_id, MethodResponse::LimitReached => panic!("Expected started response"), @@ -1449,7 +1441,7 @@ async fn follow_generates_initial_blocks() { let block_2_f_hash = block_2_f.header.hash(); client.import(BlockOrigin::Own, block_2_f.clone()).await.unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -1561,7 +1553,7 @@ async fn follow_exceeding_pinned_blocks() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -1640,7 +1632,7 @@ async fn follow_with_unpin() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -1672,17 +1664,14 @@ async fn follow_with_unpin() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &invalid_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -1690,10 +1679,7 @@ async fn follow_with_unpin() { ); // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &block_hash]).await.unwrap(); // Block tree: // finalized_block -> block -> block2 @@ -1754,7 +1740,7 @@ async fn unpin_duplicate_hashes() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -1786,7 +1772,7 @@ async fn unpin_duplicate_hashes() { // Try to unpin duplicate hashes. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", + "chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash]], ) .await @@ -1821,7 +1807,7 @@ async fn unpin_duplicate_hashes() { // Try to unpin duplicate hashes. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", + "chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash_2, &block_hash]], ) .await @@ -1832,7 +1818,7 @@ async fn unpin_duplicate_hashes() { // Can unpin blocks. let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash_2]]) + .call("chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_hash, &block_hash_2]]) .await .unwrap(); } @@ -1859,7 +1845,7 @@ async fn follow_with_multiple_unpin_hashes() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -1930,16 +1916,13 @@ async fn follow_with_multiple_unpin_hashes() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &invalid_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &invalid_hash]) .await .unwrap_err(); assert_matches!(err, @@ -1947,14 +1930,14 @@ async fn follow_with_multiple_unpin_hashes() { ); let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_1_hash]) + .call("chainHead_v1_unpin", rpc_params![&sub_id, &block_1_hash]) .await .unwrap(); // One block hash is invalid. Block 1 is already unpinned. let err = api .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", + "chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_1_hash, &block_2_hash, &block_3_hash]], ) .await @@ -1965,16 +1948,13 @@ async fn follow_with_multiple_unpin_hashes() { // Unpin multiple blocks. let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) + .call("chainHead_v1_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) .await .unwrap(); // Check block 2 and 3 are unpinned. let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &block_2_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &block_2_hash]) .await .unwrap_err(); assert_matches!(err, @@ -1982,10 +1962,7 @@ async fn follow_with_multiple_unpin_hashes() { ); let err = api - .call::<_, serde_json::Value>( - "chainHead_unstable_unpin", - rpc_params![&sub_id, &block_3_hash], - ) + .call::<_, serde_json::Value>("chainHead_v1_unpin", rpc_params![&sub_id, &block_3_hash]) .await .unwrap_err(); assert_matches!(err, @@ -2016,7 +1993,7 @@ async fn follow_prune_best_block() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -2178,7 +2155,7 @@ async fn follow_prune_best_block() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); } #[tokio::test] @@ -2282,7 +2259,7 @@ async fn follow_forks_pruned_block() { // Block 2_f and 3_f are not pruned, pruning happens at height (N - 1). client.finalize_block(block_3_hash, None).unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -2444,7 +2421,7 @@ async fn follow_report_multiple_pruned_block() { let block_3_f = block_builder.build().unwrap().block; let block_3_f_hash = block_3_f.hash(); client.import(BlockOrigin::Own, block_3_f.clone()).await.unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -2630,7 +2607,7 @@ async fn pin_block_references() { } } - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -2669,10 +2646,7 @@ async fn pin_block_references() { wait_pinned_references(&backend, &hash, 1).await; // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api - .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &block_hash]).await.unwrap(); // Make sure unpin clears out the reference. let refs = backend.pin_refs(&hash).unwrap(); @@ -2765,7 +2739,7 @@ async fn follow_finalized_before_new_block() { let block_1_hash = block_1.header.hash(); client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Trigger the `FinalizedNotification` for block 1 before the `BlockImportNotification`, and // expect for the `chainHead` to generate `NewBlock`, `BestBlock` and `Finalized` events. @@ -2870,7 +2844,7 @@ async fn ensure_operation_limits_works() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -2909,7 +2883,7 @@ async fn ensure_operation_limits_works() { ]; let response: MethodResponse = api - .call("chainHead_unstable_storage", rpc_params![&sub_id, &block_hash, items]) + .call("chainHead_v1_storage", rpc_params![&sub_id, &block_hash, items]) .await .unwrap(); let operation_id = match response { @@ -2932,7 +2906,7 @@ async fn ensure_operation_limits_works() { let call_parameters = hex_string(&alice_id.encode()); let response: MethodResponse = api .call( - "chainHead_unstable_call", + "chainHead_v1_call", [&sub_id, &block_hash, "AccountNonceApi_account_nonce", &call_parameters], ) .await @@ -2977,7 +2951,7 @@ async fn check_continue_operation() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -3014,17 +2988,17 @@ async fn check_continue_operation() { // Invalid subscription ID must produce no results. let _res: () = api - .call("chainHead_unstable_continue", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_continue", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Invalid operation ID must produce no results. - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &invalid_hash]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &invalid_hash]).await.unwrap(); // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -3060,7 +3034,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3079,7 +3053,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3099,7 +3073,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3118,7 +3092,7 @@ async fn check_continue_operation() { std::time::Duration::from_secs(DOES_NOT_PRODUCE_EVENTS_SECONDS), ) .await; - let _res: () = api.call("chainHead_unstable_continue", [&sub_id, &operation_id]).await.unwrap(); + let _res: () = api.call("chainHead_v1_continue", [&sub_id, &operation_id]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && @@ -3162,7 +3136,7 @@ async fn stop_storage_operation() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); @@ -3196,20 +3170,17 @@ async fn stop_storage_operation() { // Invalid subscription ID must produce no results. let _res: () = api - .call("chainHead_unstable_stopOperation", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_v1_stopOperation", ["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Invalid operation ID must produce no results. - let _res: () = api - .call("chainHead_unstable_stopOperation", [&sub_id, &invalid_hash]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_stopOperation", [&sub_id, &invalid_hash]).await.unwrap(); // Valid call with storage at the key. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -3241,10 +3212,7 @@ async fn stop_storage_operation() { ); // Stop the operation. - let _res: () = api - .call("chainHead_unstable_stopOperation", [&sub_id, &operation_id]) - .await - .unwrap(); + let _res: () = api.call("chainHead_v1_stopOperation", [&sub_id, &operation_id]).await.unwrap(); does_not_produce_event::>( &mut sub, @@ -3272,7 +3240,7 @@ async fn storage_closest_merkle_value() { // Valid call with storage at the keys. let response: MethodResponse = api .call( - "chainHead_unstable_storage", + "chainHead_v1_storage", rpc_params![ &sub_id, &block_hash, @@ -3466,7 +3434,7 @@ async fn chain_head_stop_all_subscriptions() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Ensure the imported block is propagated and pinned for this subscription. assert_matches!( @@ -3500,8 +3468,7 @@ async fn chain_head_stop_all_subscriptions() { ); } - let mut second_sub = - api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut second_sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Lagging detected, the stop event is delivered immediately. assert_matches!( get_next_event::>(&mut second_sub).await, @@ -3512,14 +3479,14 @@ async fn chain_head_stop_all_subscriptions() { assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); // Other subscriptions cannot be started until the suspension period is over. - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Should receive the stop event immediately. assert_matches!(get_next_event::>(&mut sub).await, FollowEvent::Stop); // For the next subscription, lagging distance must be smaller. client.finalize_block(parent_hash, None).unwrap(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); assert_matches!( get_next_event::>(&mut sub).await, FollowEvent::Initialized(_) @@ -3681,12 +3648,12 @@ async fn chain_head_limit_reached() { ) .into_rpc(); - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; - let error = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap_err(); + let error = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap_err(); assert!(error .to_string() .contains("Maximum number of chainHead_follow has been reached")); @@ -3696,7 +3663,7 @@ async fn chain_head_limit_reached() { // Ensure the `chainHead_unfollow` is propagated to the server. tokio::time::sleep(std::time::Duration::from_secs(5)).await; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [true]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [true]).await.unwrap(); // Initialized must always be reported first. let _event: FollowEvent = get_next_event(&mut sub).await; } @@ -3723,7 +3690,7 @@ async fn follow_unique_pruned_blocks() { .into_rpc(); let finalized_hash = client.info().finalized_hash; - let mut sub = api.subscribe_unbounded("chainHead_unstable_follow", [false]).await.unwrap(); + let mut sub = api.subscribe_unbounded("chainHead_v1_follow", [false]).await.unwrap(); // Initialized must always be reported first. let event: FollowEvent = get_next_event(&mut sub).await; @@ -3827,7 +3794,7 @@ async fn follow_unique_pruned_blocks() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_v1_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); // Import block 7 and check it. let block_7_hash = import_block(client.clone(), block_6_hash, 3).await.hash(); From 98a364fe6e7abf10819f5fddd3de0588f7c38700 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 19 Apr 2024 07:34:26 +0300 Subject: [PATCH 17/74] rpc-v2: Limit transactionBroadcast calls to 16 (#3772) This PR limits the number of active calls to the transactionBroadcast APIs to 16. cc @paritytech/subxt-team Closes: https://github.com/paritytech/polkadot-sdk/issues/3081 --------- Signed-off-by: Alexandru Vasile Co-authored-by: James Wilson --- .../client/rpc-spec-v2/src/transaction/api.rs | 8 +-- .../src/transaction/tests/setup.rs | 11 ++- .../tests/transaction_broadcast_tests.rs | 68 +++++++++++++++++-- .../src/transaction/transaction.rs | 1 + .../src/transaction/transaction_broadcast.rs | 63 +++++++++++++++-- substrate/client/service/src/builder.rs | 3 + 6 files changed, 133 insertions(+), 21 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index 33af9c953338..119bf270c63a 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -47,14 +47,14 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_broadcast")] - fn broadcast(&self, bytes: Bytes) -> RpcResult>; + #[method(name = "transaction_unstable_broadcast", raw_method)] + async fn broadcast(&self, bytes: Bytes) -> RpcResult>; /// Broadcast an extrinsic to the chain. /// /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_stop")] - fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; + #[method(name = "transaction_unstable_stop", raw_method)] + async fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs index 4a15657a7f69..570174a3db64 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/setup.rs @@ -67,6 +67,7 @@ fn maintained_pool( pub fn setup_api( options: Options, + max_tx_per_connection: usize, ) -> ( Arc, Arc, @@ -85,9 +86,13 @@ pub fn setup_api( let (task_executor, executor_recv) = TaskExecutorBroadcast::new(); - let tx_api = - RpcTransactionBroadcast::new(client_mock.clone(), pool.clone(), Arc::new(task_executor)) - .into_rpc(); + let tx_api = RpcTransactionBroadcast::new( + client_mock.clone(), + pool.clone(), + Arc::new(task_executor), + max_tx_per_connection, + ) + .into_rpc(); (api, pool, client_mock, tx_api, executor_recv, pool_state) } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index 14e188b6a873..f4a69bd6ed47 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -26,6 +26,8 @@ use std::sync::Arc; use substrate_test_runtime_client::AccountKeyring::*; use substrate_test_runtime_transaction_pool::uxt; +const MAX_TX_PER_CONNECTION: usize = 4; + // Test helpers. use crate::transaction::tests::{ middleware_pool::{MiddlewarePoolEvent, TxStatusTypeTest}, @@ -35,7 +37,7 @@ use crate::transaction::tests::{ #[tokio::test] async fn tx_broadcast_enters_pool() { let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(Default::default()); + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Start at block 1. let block_1_header = api.push_block(1, vec![], true); @@ -94,7 +96,8 @@ async fn tx_broadcast_enters_pool() { #[tokio::test] async fn tx_broadcast_invalid_tx() { - let (_, pool, _, tx_api, exec_middleware, _) = setup_api(Default::default()); + let (_, pool, _, tx_api, exec_middleware, _) = + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Invalid parameters. let err = tx_api @@ -131,7 +134,7 @@ async fn tx_broadcast_invalid_tx() { #[tokio::test] async fn tx_stop_with_invalid_operation_id() { - let (_, _, _, tx_api, _, _) = setup_api(Default::default()); + let (_, _, _, tx_api, _, _) = setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Make an invalid stop call. let err = tx_api @@ -146,7 +149,7 @@ async fn tx_stop_with_invalid_operation_id() { #[tokio::test] async fn tx_broadcast_resubmits_future_nonce_tx() { let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(Default::default()); + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Start at block 1. let block_1_header = api.push_block(1, vec![], true); @@ -237,7 +240,7 @@ async fn tx_broadcast_resubmits_future_nonce_tx() { #[tokio::test] async fn tx_broadcast_stop_after_broadcast_finishes() { let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(Default::default()); + setup_api(Default::default(), MAX_TX_PER_CONNECTION); // Start at block 1. let block_1_header = api.push_block(1, vec![], true); @@ -320,7 +323,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { }; let (api, pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = - setup_api(options); + setup_api(options, MAX_TX_PER_CONNECTION); let uxt = uxt(Alice, ALICE_NONCE); let xt = hex_string(&uxt.encode()); @@ -439,7 +442,8 @@ async fn tx_broadcast_resubmits_dropped_tx() { ban_time: std::time::Duration::ZERO, }; - let (api, pool, client_mock, tx_api, _, mut pool_middleware) = setup_api(options); + let (api, pool, client_mock, tx_api, _, mut pool_middleware) = + setup_api(options, MAX_TX_PER_CONNECTION); let current_uxt = uxt(Alice, ALICE_NONCE); let current_xt = hex_string(¤t_uxt.encode()); @@ -518,3 +522,53 @@ async fn tx_broadcast_resubmits_dropped_tx() { // The dropped transaction was resubmitted. assert_eq!(events.get(&future_xt).unwrap(), &vec![TxStatusTypeTest::Ready]); } + +#[tokio::test] +async fn tx_broadcast_limit_reached() { + // One operation per connection. + let (api, _pool, client_mock, tx_api, mut exec_middleware, mut pool_middleware) = + setup_api(Default::default(), 1); + + // Start at block 1. + let block_1_header = api.push_block(1, vec![], true); + let uxt = uxt(Alice, ALICE_NONCE); + let xt = hex_string(&uxt.encode()); + + let operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + + // Announce block 1 to `transaction_unstable_broadcast`. + client_mock.trigger_import_stream(block_1_header).await; + + // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + let event = get_next_event!(&mut pool_middleware); + assert_eq!( + event, + MiddlewarePoolEvent::TransactionStatus { + transaction: xt.clone(), + status: TxStatusTypeTest::Ready + } + ); + assert_eq!(1, exec_middleware.num_tasks()); + + let operation_id_limit_reached: Option = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + assert!(operation_id_limit_reached.is_none(), "No operation ID => tx was rejected"); + + // We still have in flight one operation. + assert_eq!(1, exec_middleware.num_tasks()); + + // Force the future to exit by calling stop. + let _: () = tx_api + .call("transaction_unstable_stop", rpc_params![&operation_id]) + .await + .unwrap(); + + // Ensure the broadcast future finishes. + let _ = get_next_event!(&mut exec_middleware.recv); + assert_eq!(0, exec_middleware.num_tasks()); + + // Can resubmit again now. + let _operation_id: String = + tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); +} diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs index 6a7c69b8f7d1..723440d1b111 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction.rs @@ -26,6 +26,7 @@ use crate::{ }, SubscriptionTaskExecutor, }; + use codec::Decode; use futures::{StreamExt, TryFutureExt}; use jsonrpsee::{core::async_trait, PendingSubscriptionSink}; diff --git a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs index ef1a426865d5..68c19010e31c 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/transaction_broadcast.rs @@ -18,11 +18,17 @@ //! API implementation for broadcasting transactions. -use crate::{transaction::api::TransactionBroadcastApiServer, SubscriptionTaskExecutor}; +use crate::{ + common::connections::RpcConnections, transaction::api::TransactionBroadcastApiServer, + SubscriptionTaskExecutor, +}; use codec::Decode; use futures::{FutureExt, Stream, StreamExt}; use futures_util::stream::AbortHandle; -use jsonrpsee::core::{async_trait, RpcResult}; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + ConnectionDetails, +}; use parking_lot::RwLock; use rand::{distributions::Alphanumeric, Rng}; use sc_client_api::BlockchainEvents; @@ -46,6 +52,8 @@ pub struct TransactionBroadcast { executor: SubscriptionTaskExecutor, /// The broadcast operation IDs. broadcast_ids: Arc>>>, + /// Keep track of how many concurrent operations are active for each connection. + rpc_connections: RpcConnections, } /// The state of a broadcast operation. @@ -58,8 +66,19 @@ struct BroadcastState { impl TransactionBroadcast { /// Creates a new [`TransactionBroadcast`]. - pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { - TransactionBroadcast { client, pool, executor, broadcast_ids: Default::default() } + pub fn new( + client: Arc, + pool: Arc, + executor: SubscriptionTaskExecutor, + max_transactions_per_connection: usize, + ) -> Self { + TransactionBroadcast { + client, + pool, + executor, + broadcast_ids: Default::default(), + rpc_connections: RpcConnections::new(max_transactions_per_connection), + } } /// Generate an unique operation ID for the `transaction_broadcast` RPC method. @@ -102,12 +121,26 @@ where ::Hash: Unpin, Client: HeaderBackend + BlockchainEvents + Send + Sync + 'static, { - fn broadcast(&self, bytes: Bytes) -> RpcResult> { + async fn broadcast( + &self, + connection_details: ConnectionDetails, + bytes: Bytes, + ) -> RpcResult> { let pool = self.pool.clone(); // The unique ID of this operation. let id = self.generate_unique_id(); + // Ensure that the connection has not reached the maximum number of active operations. + let Some(reserved_connection) = self.rpc_connections.reserve_space(connection_details.id()) + else { + return Ok(None) + }; + let Some(reserved_identifier) = reserved_connection.register(id.clone()) else { + // This can only happen if the generated operation ID is not unique. + return Ok(None) + }; + // The JSON-RPC server might check whether the transaction is valid before broadcasting it. // If it does so and if the transaction is invalid, the server should silently do nothing // and the JSON-RPC client is not informed of the problem. Invalid transactions should still @@ -118,7 +151,11 @@ where // Save the tx hash to remove it later. let tx_hash = pool.hash_of(&decoded_extrinsic); - let mut best_block_import_stream = + // The compiler can no longer deduce the type of the stream and complains + // about `one type is more general than the other`. + let mut best_block_import_stream: std::pin::Pin< + Box::Hash> + Send>, + > = Box::pin(self.client.import_notification_stream().filter_map( |notification| async move { notification.is_new_best.then_some(notification.hash) }, )); @@ -180,6 +217,9 @@ where // The future expected by the executor must be `Future` instead of // `Future>`. let fut = fut.map(move |result| { + // Connection space is cleaned when this object is dropped. + drop(reserved_identifier); + // Remove the entry from the broadcast IDs map. let Some(broadcast_state) = broadcast_ids.write().remove(&drop_id) else { return }; @@ -203,7 +243,16 @@ where Ok(Some(id)) } - fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast> { + async fn stop_broadcast( + &self, + connection_details: ConnectionDetails, + operation_id: String, + ) -> Result<(), ErrorBroadcast> { + // The operation ID must correlate to the same connection ID. + if !self.rpc_connections.contains_identifier(connection_details.id(), &operation_id) { + return Err(ErrorBroadcast::InvalidOperationID) + } + let mut broadcast_ids = self.broadcast_ids.write(); let Some(broadcast_state) = broadcast_ids.remove(&operation_id) else { diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 830f9884719d..d0d7cba38624 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -644,10 +644,13 @@ where (chain, state, child_state) }; + const MAX_TRANSACTION_PER_CONNECTION: usize = 16; + let transaction_broadcast_rpc_v2 = sc_rpc_spec_v2::transaction::TransactionBroadcast::new( client.clone(), transaction_pool.clone(), task_executor.clone(), + MAX_TRANSACTION_PER_CONNECTION, ) .into_rpc(); From 4f125d1928648c53db377d0ccc68fa2d705ed9ea Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Fri, 19 Apr 2024 09:35:56 +0200 Subject: [PATCH 18/74] Update subsystem-benchmark params (#4201) - Returned latency (with it, results are more stable) - The threshold is weakened - Increased number of runs --- .../availability-distribution-regression-bench.rs | 14 ++++++-------- .../availability-recovery-regression-bench.rs | 10 ++++------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs index 0d4f4f49e31f..5e3072be3a8c 100644 --- a/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs +++ b/polkadot/node/network/availability-distribution/benches/availability-distribution-regression-bench.rs @@ -31,7 +31,7 @@ use polkadot_subsystem_bench::{ }; use std::io::Write; -const BENCH_COUNT: usize = 5; +const BENCH_COUNT: usize = 50; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -40,8 +40,6 @@ fn main() -> Result<(), String> { config.n_cores = 10; config.n_validators = 500; config.num_blocks = 3; - config.connectivity = 100; - config.latency = None; config.generate_pov_sizes(); let state = TestState::new(&config); @@ -75,13 +73,13 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 433.3, 0.001), - ("Sent to peers", 18480.0, 0.001), + ("Received from peers", 433.3333, 0.001), + ("Sent to peers", 18479.9000, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("availability-distribution", 0.012, 0.05), - ("availability-store", 0.153, 0.05), - ("bitfield-distribution", 0.026, 0.05), + ("availability-distribution", 0.0123, 0.1), + ("availability-store", 0.1597, 0.1), + ("bitfield-distribution", 0.0223, 0.1), ])); if messages.is_empty() { diff --git a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs index 9be147bda93a..d9bdc1a2d944 100644 --- a/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs +++ b/polkadot/node/network/availability-recovery/benches/availability-recovery-regression-bench.rs @@ -32,7 +32,7 @@ use polkadot_subsystem_bench::{ }; use std::io::Write; -const BENCH_COUNT: usize = 5; +const BENCH_COUNT: usize = 10; fn main() -> Result<(), String> { let mut messages = vec![]; @@ -40,8 +40,6 @@ fn main() -> Result<(), String> { let options = DataAvailabilityReadOptions { fetch_from_backers: true }; let mut config = TestConfiguration::default(); config.num_blocks = 3; - config.connectivity = 100; - config.latency = None; config.generate_pov_sizes(); let state = TestState::new(&config); @@ -73,10 +71,10 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 307200.000, 0.001), - ("Sent to peers", 1.667, 0.001), + ("Received from peers", 307203.0000, 0.001), + ("Sent to peers", 1.6667, 0.001), ])); - messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 11.500, 0.05)])); + messages.extend(average_usage.check_cpu_usage(&[("availability-recovery", 12.8338, 0.1)])); if messages.is_empty() { Ok(()) From 04a9071e2a5ba903648f8db19066e671659850fb Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Fri, 19 Apr 2024 11:15:59 +0300 Subject: [PATCH 19/74] Use higher priority for PVF preparation in dispute/approval context (#4172) Related to https://github.com/paritytech/polkadot-sdk/issues/4126 discussion Currently all preparations have same priority and this is not ideal in all cases. This change should improve the finality time in the context of on-demand parachains and when `ExecutorParams` are updated on-chain and a rebuild of all artifacts is required. The desired effect is to speed up approval and dispute PVF executions which require preparation and delay backing executions which require preparation. --------- Signed-off-by: Andrei Sandu --- .../node/core/candidate-validation/src/lib.rs | 40 ++++++++++++++----- .../core/candidate-validation/src/tests.rs | 2 + polkadot/node/core/pvf/src/host.rs | 2 +- polkadot/node/core/pvf/src/priority.rs | 7 ++-- 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index ec24434db24c..8663dc43835a 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -657,7 +657,14 @@ async fn validate_candidate_exhaustive( PrepareJobKind::Compilation, ); - validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await + validation_backend + .validate_candidate( + pvf, + exec_timeout, + params.encode(), + polkadot_node_core_pvf::Priority::Normal, + ) + .await }, PvfExecKind::Approval => validation_backend @@ -667,6 +674,7 @@ async fn validate_candidate_exhaustive( params, executor_params, PVF_APPROVAL_EXECUTION_RETRY_DELAY, + polkadot_node_core_pvf::Priority::Critical, ) .await, }; @@ -749,10 +757,15 @@ trait ValidationBackend { pvf: PvfPrepData, exec_timeout: Duration, encoded_params: Vec, + // The priority for the preparation job. + prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result; - /// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered - /// that may have been transient. + /// Tries executing a PVF. Will retry once if an error is encountered that may have + /// been transient. + /// + /// The `prepare_priority` is relevant in the context of the caller. Currently we expect + /// that `approval` context has priority over `backing` context. /// /// NOTE: Should retry only on errors that are a result of execution itself, and not of /// preparation. @@ -763,6 +776,8 @@ trait ValidationBackend { params: ValidationParams, executor_params: ExecutorParams, retry_delay: Duration, + // The priority for the preparation job. + prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. @@ -776,8 +791,10 @@ trait ValidationBackend { // long. let total_time_start = Instant::now(); - let mut validation_result = - self.validate_candidate(pvf.clone(), exec_timeout, params.encode()).await; + // Use `Priority::Critical` as finality trumps parachain liveliness. + let mut validation_result = self + .validate_candidate(pvf.clone(), exec_timeout, params.encode(), prepare_priority) + .await; if validation_result.is_ok() { return validation_result } @@ -851,8 +868,9 @@ trait ValidationBackend { // Encode the params again when re-trying. We expect the retry case to be relatively // rare, and we want to avoid unconditionally cloning data. - validation_result = - self.validate_candidate(pvf.clone(), new_timeout, params.encode()).await; + validation_result = self + .validate_candidate(pvf.clone(), new_timeout, params.encode(), prepare_priority) + .await; } } @@ -870,11 +888,13 @@ impl ValidationBackend for ValidationHost { pvf: PvfPrepData, exec_timeout: Duration, encoded_params: Vec, + // The priority for the preparation job. + prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { - let priority = polkadot_node_core_pvf::Priority::Normal; - let (tx, rx) = oneshot::channel(); - if let Err(err) = self.execute_pvf(pvf, exec_timeout, encoded_params, priority, tx).await { + if let Err(err) = + self.execute_pvf(pvf, exec_timeout, encoded_params, prepare_priority, tx).await + { return Err(InternalValidationError::HostCommunication(format!( "cannot send pvf to the validation host, it might have shut down: {:?}", err diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index f646f8535495..e492d51e239e 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -368,6 +368,7 @@ impl ValidationBackend for MockValidateCandidateBackend { _pvf: PvfPrepData, _timeout: Duration, _encoded_params: Vec, + _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { // This is expected to panic if called more times than expected, indicating an error in the // test. @@ -1044,6 +1045,7 @@ impl ValidationBackend for MockPreCheckBackend { _pvf: PvfPrepData, _timeout: Duration, _encoded_params: Vec, + _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { unreachable!() } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 59d5a7e20a88..247d753d7c44 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -197,7 +197,7 @@ impl Config { prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), prepare_workers_soft_max_num: 1, - prepare_workers_hard_max_num: 1, + prepare_workers_hard_max_num: 2, execute_worker_program_path, execute_worker_spawn_timeout: Duration::from_secs(3), diff --git a/polkadot/node/core/pvf/src/priority.rs b/polkadot/node/core/pvf/src/priority.rs index d1ef9c604b11..0d18d4b484ca 100644 --- a/polkadot/node/core/pvf/src/priority.rs +++ b/polkadot/node/core/pvf/src/priority.rs @@ -14,17 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -/// A priority assigned to execution of a PVF. +/// A priority assigned to preparation of a PVF. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum Priority { /// Normal priority for things that do not require immediate response, but still need to be /// done pretty quick. /// - /// Approvals and disputes fall into this category. + /// Backing falls into this category. Normal, /// This priority is used for requests that are required to be processed as soon as possible. /// - /// For example, backing is on a critical path and requires execution as soon as possible. + /// Disputes and approvals are on a critical path and require execution as soon as + /// possible to not delay finality. Critical, } From 21308d893ef0594538aee73cbdc3905189be0b7b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 19 Apr 2024 11:34:46 +0300 Subject: [PATCH 20/74] Fixed GrandpaConsensusLogReader::find_scheduled_change (#4208) --- bridges/primitives/header-chain/src/lib.rs | 60 +++++++++++++++++----- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs index 98fb9ff83d83..ad496012c6a3 100644 --- a/bridges/primitives/header-chain/src/lib.rs +++ b/bridges/primitives/header-chain/src/lib.rs @@ -32,7 +32,9 @@ use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; use frame_support::PalletError; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; -use sp_consensus_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; +use sp_consensus_grandpa::{ + AuthorityList, ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID, +}; use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug}; use sp_std::{boxed::Box, vec::Vec}; @@ -147,24 +149,23 @@ pub struct GrandpaConsensusLogReader(sp_std::marker::PhantomData impl GrandpaConsensusLogReader { /// Find and return scheduled (regular) change digest item. - pub fn find_scheduled_change( - digest: &Digest, - ) -> Option> { + pub fn find_scheduled_change(digest: &Digest) -> Option> { + use sp_runtime::generic::OpaqueDigestItemId; + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; + // find the first consensus digest with the right ID which converts to // the right kind of consensus log. - digest - .convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID)) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }) + digest.convert_first(|l| l.try_to(id).and_then(filter_log)) } /// Find and return forced change digest item. Or light client can't do anything /// with forced changes, so we can't accept header with the forced change digest. - pub fn find_forced_change( - digest: &Digest, - ) -> Option<(Number, sp_consensus_grandpa::ScheduledChange)> { + pub fn find_forced_change(digest: &Digest) -> Option<(Number, ScheduledChange)> { // find the first consensus digest with the right ID which converts to // the right kind of consensus log. digest @@ -346,7 +347,7 @@ mod tests { use super::*; use bp_runtime::ChainId; use frame_support::weights::Weight; - use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature}; + use sp_runtime::{testing::H256, traits::BlakeTwo256, DigestItem, MultiSignature}; struct TestChain; @@ -385,4 +386,35 @@ mod tests { max_expected_submit_finality_proof_arguments_size::(false, 100), ); } + + #[test] + fn find_scheduled_change_works() { + let scheduled_change = ScheduledChange { next_authorities: vec![], delay: 0 }; + + // first + let mut digest = Digest::default(); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(), + )); + assert_eq!( + GrandpaConsensusLogReader::find_scheduled_change(&digest), + Some(scheduled_change.clone()) + ); + + // not first + let mut digest = Digest::default(); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::::OnDisabled(0).encode(), + )); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::ScheduledChange(scheduled_change.clone()).encode(), + )); + assert_eq!( + GrandpaConsensusLogReader::find_scheduled_change(&digest), + Some(scheduled_change.clone()) + ); + } } From 69f4373178f33d702a7e02e71358eb826877d6f8 Mon Sep 17 00:00:00 2001 From: Bulat Saifullin Date: Fri, 19 Apr 2024 12:04:03 +0300 Subject: [PATCH 21/74] Provide WSS bootnodes for Rococo and Westend parachains (#4161) Some Rococo parachains lacked WS nodes. `ws/wss` endpoints are necessary for using light clients on the testnet. Changes: 1. Add `ws/wss` endpoints to all testnet parachains. 2. Remove decommissioned nodes (`people-collator-node-2` `people-collator-node-3` ). --- cumulus/parachains/chain-specs/asset-hub-rococo.json | 6 +++++- .../parachains/chain-specs/asset-hub-westend.json | 4 ++++ .../parachains/chain-specs/bridge-hub-rococo.json | 6 +++++- .../parachains/chain-specs/bridge-hub-westend.json | 4 ++++ .../parachains/chain-specs/collectives-westend.json | 2 ++ cumulus/parachains/chain-specs/contracts-rococo.json | 2 ++ cumulus/parachains/chain-specs/coretime-rococo.json | 8 ++++++-- cumulus/parachains/chain-specs/coretime-westend.json | 4 ++++ cumulus/parachains/chain-specs/people-rococo.json | 12 +++++------- cumulus/parachains/chain-specs/people-westend.json | 4 +++- 10 files changed, 40 insertions(+), 12 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-rococo.json b/cumulus/parachains/chain-specs/asset-hub-rococo.json index 900d9f0ffb2c..87ff2fb220a1 100644 --- a/cumulus/parachains/chain-specs/asset-hub-rococo.json +++ b/cumulus/parachains/chain-specs/asset-hub-rococo.json @@ -4,7 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", - "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS" + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS", + "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS", + "/dns/rococo-asset-hub-bootnode-0.polkadot.io/tcp/443/wss/p2p/12D3KooWRrZMndHAopzao34uGsN7srjS3gh9nAjTGKLSyJeU31Lg", + "/dns/rococo-asset-hub-bootnode-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAewimoNJqMaiiV5pYiowA5hLuh5JS5QiRJCCyWVrrSTS" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index 670935c9d247..3752213e702e 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -5,6 +5,10 @@ "bootNodes": [ "/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30333/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", "/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30333/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", + "/dns/westend-asset-hub-bootnode-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", + "/dns/westend-asset-hub-bootnode-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", + "/dns/westend-asset-hub-connect-0.polkadot.io/tcp/443/wss/p2p/12D3KooWJaAfPyiye7ZQBuHengTJJoMrcaz7Jj1UzHiKdNxA1Nkd", + "/dns/westend-asset-hub-connect-1.polkadot.io/tcp/443/wss/p2p/12D3KooWGL3hpWycWyeqyL9gHNnmmsL474WkPZdqraBHu4L6fQrW", "/dns/boot.stake.plus/tcp/33333/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.stake.plus/tcp/33334/wss/p2p/12D3KooWNiB27rpXX7EYongoWWUeRKzLQxWGms6MQU2B9LX7Ztzo", "/dns/boot.metaspan.io/tcp/36052/p2p/12D3KooWBCqfNb6Y39DXTr4UBWXyjuS3hcZM1qTbHhDXxF6HkAJJ", diff --git a/cumulus/parachains/chain-specs/bridge-hub-rococo.json b/cumulus/parachains/chain-specs/bridge-hub-rococo.json index 6b430678a86c..53aef58422db 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-rococo.json +++ b/cumulus/parachains/chain-specs/bridge-hub-rococo.json @@ -4,7 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", - "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ" + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ", + "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ", + "/dns/rococo-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJCFBJmFF65xz5xHeZQRSCf35BxfSEB3RHQFoLza28LWU", + "/dns/rococo-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWJzLd8skcAgA24EcJey7aJAhYctfUxWGjSP5Usk9wbpPZ" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 447207a58107..5140071ec44c 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -5,6 +5,10 @@ "bootNodes": [ "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", + "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", + "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", + "/dns/westend-bridge-hub-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWKyEuqkkWvFSrwZWKWBAsHgLV3HGfHj7yH3LNJLAVhmxY", + "/dns/westend-bridge-hub-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBpvudthz61XC4oP2YYFFJdhWohBeQ1ffn1BMSGWhapjd", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/30338/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/westend-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWJWWRYTAwBLqYkh7iMBGDr5ouJ3MHj7M3fZ7zWS4zEk6F", "/dns/boot-cr.gatotech.network/tcp/33330/p2p/12D3KooWJHG6qznPzTSEbuujHNcvyzBZcR9zNRPFcXWUaoVWZBEw", diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index e459c631f8be..fdd6348f02a9 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -5,6 +5,8 @@ "bootNodes": [ "/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30334/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", + "/dns/westend-collectives-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", + "/dns/westend-collectives-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", "/dns/westend-collectives-collator-0.polkadot.io/tcp/443/wss/p2p/12D3KooWBMAuyzQu3yAf8YXyoyxsSzSsgoaqAepgnNyQcPaPjPXe", "/dns/westend-collectives-collator-1.polkadot.io/tcp/443/wss/p2p/12D3KooWAujYtHbCs4MiDD57JNTntTJnYnikfnaPa7JdnMyAUrHB", "/dns/boot.stake.plus/tcp/38333/p2p/12D3KooWQoVsFCfgu21iu6kdtQsU9T6dPn1wsyLn1U34yPerR6zQ", diff --git a/cumulus/parachains/chain-specs/contracts-rococo.json b/cumulus/parachains/chain-specs/contracts-rococo.json index 422268a5efdb..71783481e5cc 100644 --- a/cumulus/parachains/chain-specs/contracts-rococo.json +++ b/cumulus/parachains/chain-specs/contracts-rococo.json @@ -5,6 +5,8 @@ "bootNodes": [ "/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", + "/dns/rococo-contracts-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", + "/dns/rococo-contracts-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh", "/dns/rococo-contracts-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWKg3Rpxcr9oJ8n6khoxpGKWztCZydtUZk2cojHqnfLrpj", "/dns/rococo-contracts-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPEXYrz8tHU3nDtPoPw4V7ou5dzMEWSTuUj7vaWiYVAVh" ], diff --git a/cumulus/parachains/chain-specs/coretime-rococo.json b/cumulus/parachains/chain-specs/coretime-rococo.json index 39506095bfe0..082e7dd26a95 100644 --- a/cumulus/parachains/chain-specs/coretime-rococo.json +++ b/cumulus/parachains/chain-specs/coretime-rococo.json @@ -4,7 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30333/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", - "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30333/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX", + "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/30335/ws/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/30335/ws/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX", + "/dns/rococo-coretime-collator-node-0.polkadot.io/tcp/443/wss/p2p/12D3KooWHBUH9wGBx1Yq1ZePov9VL3AzxRPv5DTR4KadiCU6VKxy", + "/dns/rococo-coretime-collator-node-1.polkadot.io/tcp/443/wss/p2p/12D3KooWB3SKxdj6kpwTkdMnHJi6YmadojCzmEqFkeFJjxN812XX" ], "telemetryEndpoints": null, "protocolId": null, @@ -67,4 +71,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/coretime-westend.json b/cumulus/parachains/chain-specs/coretime-westend.json index 8f096fa6a962..85e129e68489 100644 --- a/cumulus/parachains/chain-specs/coretime-westend.json +++ b/cumulus/parachains/chain-specs/coretime-westend.json @@ -5,6 +5,10 @@ "bootNodes": [ "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", + "/dns/westend-coretime-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWP93Dzk8T7GWxyWw9jhLcz8Pksokk3R9vL2eEH337bNkT", + "/dns/westend-coretime-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWMh2imeAzsZKGQgm2cv6Uoep3GBYtwGfujt1bs5YfVzkH", "/dns/boot.metaspan.io/tcp/33019/p2p/12D3KooWCa1uNnEZqiqJY9jkKNQxwSLGPeZ5MjWHhjQMGwga9JMM", "/dns/boot-node.helikon.io/tcp/9420/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", "/dns/boot-node.helikon.io/tcp/9422/wss/p2p/12D3KooWFBPartM873MNm1AmVK3etUz34cAE9A9rwPztPno2epQ3", diff --git a/cumulus/parachains/chain-specs/people-rococo.json b/cumulus/parachains/chain-specs/people-rococo.json index b28191571521..a4361b77df79 100644 --- a/cumulus/parachains/chain-specs/people-rococo.json +++ b/cumulus/parachains/chain-specs/people-rococo.json @@ -4,13 +4,11 @@ "chainType": "Live", "bootNodes": [ "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", - "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", - "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", - "/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT", - "/dns/rococo-people-collator-node-2.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWBvA9BmBfrsVMcAcqVXGYFCpMTvkSk2igNXpmoareYbeT", - "/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG", - "/dns/rococo-people-collator-node-3.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWQ7Q9jLcJTPXy7KEp5hSZ8YMY9pHx9CnQVz3T8TKQ81UG" + "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", + "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H", + "/dns/rococo-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDZg5jMYhKXTu6RU491V5sxsFnP4oaEmZJEUfcRkYzps5", + "/dns/rococo-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWGGR5i6qQqfo7iDNp7vjDRKPWuDk53idGV6nFLwS12X5H" ], "telemetryEndpoints": null, "protocolId": null, @@ -79,4 +77,4 @@ "childrenDefault": {} } } -} \ No newline at end of file +} diff --git a/cumulus/parachains/chain-specs/people-westend.json b/cumulus/parachains/chain-specs/people-westend.json index 6dd8579cf257..93b8c064113f 100644 --- a/cumulus/parachains/chain-specs/people-westend.json +++ b/cumulus/parachains/chain-specs/people-westend.json @@ -4,8 +4,10 @@ "chainType": "Live", "bootNodes": [ "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", - "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/30333/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", + "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", + "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/30335/ws/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", + "/dns/westend-people-collator-node-0.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWDcLjDLTu9fNhmas9DTWtqdv8eUbFMWQzVwvXRK7QcjHD", "/dns/westend-people-collator-node-1.parity-testnet.parity.io/tcp/443/wss/p2p/12D3KooWM56JbKWAXsDyWh313z73aKYVMp1Hj2nSnAKY3q6MnoC9", "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30532/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", "/dns/identity-westend.bootnodes.polkadotters.com/tcp/30534/wss/p2p/12D3KooWKr9San6KTM7REJ95cBaDoiciGcWnW8TTftEJgxGF5Ehb", From 148d942ec0f1de2f8ea9e524428000c8886c678c Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:28:48 +0300 Subject: [PATCH 22/74] txBroadcast: Stabilize to version 1 (#4169) This PR stabilizes the txBroadcast API to version 1. Ideally needs: - https://github.com/paritytech/polkadot-sdk/pull/4050 - https://github.com/paritytech/polkadot-sdk/pull/3772 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_4169.prdoc | 8 ++ .../client/rpc-spec-v2/src/transaction/api.rs | 5 +- .../tests/transaction_broadcast_tests.rs | 84 ++++++++----------- 3 files changed, 46 insertions(+), 51 deletions(-) create mode 100644 prdoc/pr_4169.prdoc diff --git a/prdoc/pr_4169.prdoc b/prdoc/pr_4169.prdoc new file mode 100644 index 000000000000..03f2f6e597e4 --- /dev/null +++ b/prdoc/pr_4169.prdoc @@ -0,0 +1,8 @@ +title: Stabilize transactionBroadcast RPC class to version 1 + +doc: + - audience: Node Dev + description: | + The transactionBroadcast RPC API is stabilized to version 1. + +crates: [ ] diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index 119bf270c63a..c4a892190589 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -47,7 +47,8 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_broadcast", raw_method)] + + #[method(name = "transaction_v1_broadcast", raw_method)] async fn broadcast(&self, bytes: Bytes) -> RpcResult>; /// Broadcast an extrinsic to the chain. @@ -55,6 +56,6 @@ pub trait TransactionBroadcastApi { /// # Unstable /// /// This method is unstable and subject to change in the future. - #[method(name = "transaction_unstable_stop", raw_method)] + #[method(name = "transaction_v1_stop", raw_method)] async fn stop_broadcast(&self, operation_id: String) -> Result<(), ErrorBroadcast>; } diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs index f4a69bd6ed47..efb3bd94ddbf 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_broadcast_tests.rs @@ -46,12 +46,12 @@ async fn tx_broadcast_enters_pool() { let xt = hex_string(&uxt.encode()); let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -84,10 +84,7 @@ async fn tx_broadcast_enters_pool() { // The future broadcast awaits for the finalized status to be reached. // Force the future to exit by calling stop. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); + let _: () = tx_api.call("transaction_v1_stop", rpc_params![&operation_id]).await.unwrap(); // Ensure the broadcast future finishes. let _ = get_next_event!(&mut exec_middleware.recv); @@ -101,7 +98,7 @@ async fn tx_broadcast_invalid_tx() { // Invalid parameters. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_broadcast", [1u8]) + .call::<_, serde_json::Value>("transaction_v1_broadcast", [1u8]) .await .unwrap_err(); assert_matches!(err, @@ -113,7 +110,7 @@ async fn tx_broadcast_invalid_tx() { // Invalid transaction that cannot be decoded. The broadcast silently exits. let xt = "0xdeadbeef"; let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); assert_eq!(0, pool.status().ready); @@ -124,7 +121,7 @@ async fn tx_broadcast_invalid_tx() { // When the operation is not active, either from the tx being finalized or a // terminal error; the stop method should return an error. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .call::<_, serde_json::Value>("transaction_v1_stop", rpc_params![&operation_id]) .await .unwrap_err(); assert_matches!(err, @@ -138,7 +135,7 @@ async fn tx_stop_with_invalid_operation_id() { // Make an invalid stop call. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", ["invalid_operation_id"]) + .call::<_, serde_json::Value>("transaction_v1_stop", ["invalid_operation_id"]) .await .unwrap_err(); assert_matches!(err, @@ -161,15 +158,13 @@ async fn tx_broadcast_resubmits_future_nonce_tx() { let future_uxt = uxt(Alice, ALICE_NONCE + 1); let future_xt = hex_string(&future_uxt.encode()); - let future_operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![&future_xt]) - .await - .unwrap(); + let future_operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![&future_xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -188,13 +183,11 @@ async fn tx_broadcast_resubmits_future_nonce_tx() { let block_2_header = api.push_block(2, vec![], true); let block_2 = block_2_header.hash(); - let operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) - .await - .unwrap(); + let operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![¤t_xt]).await.unwrap(); assert_ne!(future_operation_id, operation_id); - // Announce block 2 to `transaction_unstable_broadcast`. + // Announce block 2 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_2_header).await; // Collect the events of both transactions. @@ -249,12 +242,12 @@ async fn tx_broadcast_stop_after_broadcast_finishes() { let xt = hex_string(&uxt.encode()); let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction // pool.inner_pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( @@ -303,7 +296,7 @@ async fn tx_broadcast_stop_after_broadcast_finishes() { // The operation ID is no longer valid, check that the broadcast future // cleared out the inner state of the operation. let err = tx_api - .call::<_, serde_json::Value>("transaction_unstable_stop", rpc_params![&operation_id]) + .call::<_, serde_json::Value>("transaction_v1_stop", rpc_params![&operation_id]) .await .unwrap_err(); assert_matches!(err, @@ -328,14 +321,14 @@ async fn tx_broadcast_resubmits_invalid_tx() { let uxt = uxt(Alice, ALICE_NONCE); let xt = hex_string(&uxt.encode()); let _operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); let block_1_header = api.push_block(1, vec![], true); let block_1 = block_1_header.hash(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -355,7 +348,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { pool.inner_pool.maintain(event).await; assert_eq!(1, pool.inner_pool.status().ready); - // Ensure the `transaction_unstable_broadcast` is aware of the invalid transaction. + // Ensure the `transaction_v1_broadcast` is aware of the invalid transaction. let event = get_next_event!(&mut pool_middleware); // Because we have received an `Invalid` status, we try to broadcast the transaction with the // next announced block. @@ -388,7 +381,7 @@ async fn tx_broadcast_resubmits_invalid_tx() { pool.inner_pool.maintain(event).await; assert_eq!(0, pool.inner_pool.status().ready); - // Announce block to `transaction_unstable_broadcast`. + // Announce block to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_3_header).await; let event = get_next_event!(&mut pool_middleware); @@ -456,12 +449,10 @@ async fn tx_broadcast_resubmits_dropped_tx() { // are immediately dropped. api.set_priority(¤t_uxt, 10); - let current_operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![¤t_xt]) - .await - .unwrap(); + let current_operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![¤t_xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. let block_1_header = api.push_block(1, vec![], true); let event = ChainEvent::Finalized { hash: block_1_header.hash(), tree_route: Arc::from(vec![]) }; @@ -480,10 +471,8 @@ async fn tx_broadcast_resubmits_dropped_tx() { // The future tx has priority 2, smaller than the current 10. api.set_priority(&future_uxt, 2); - let future_operation_id: String = tx_api - .call("transaction_unstable_broadcast", rpc_params![&future_xt]) - .await - .unwrap(); + let future_operation_id: String = + tx_api.call("transaction_v1_broadcast", rpc_params![&future_xt]).await.unwrap(); assert_ne!(current_operation_id, future_operation_id); let block_2_header = api.push_block(2, vec![], true); @@ -535,12 +524,12 @@ async fn tx_broadcast_limit_reached() { let xt = hex_string(&uxt.encode()); let operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); - // Announce block 1 to `transaction_unstable_broadcast`. + // Announce block 1 to `transaction_v1_broadcast`. client_mock.trigger_import_stream(block_1_header).await; - // Ensure the tx propagated from `transaction_unstable_broadcast` to the transaction pool. + // Ensure the tx propagated from `transaction_v1_broadcast` to the transaction pool. let event = get_next_event!(&mut pool_middleware); assert_eq!( event, @@ -552,17 +541,14 @@ async fn tx_broadcast_limit_reached() { assert_eq!(1, exec_middleware.num_tasks()); let operation_id_limit_reached: Option = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); assert!(operation_id_limit_reached.is_none(), "No operation ID => tx was rejected"); // We still have in flight one operation. assert_eq!(1, exec_middleware.num_tasks()); // Force the future to exit by calling stop. - let _: () = tx_api - .call("transaction_unstable_stop", rpc_params![&operation_id]) - .await - .unwrap(); + let _: () = tx_api.call("transaction_v1_stop", rpc_params![&operation_id]).await.unwrap(); // Ensure the broadcast future finishes. let _ = get_next_event!(&mut exec_middleware.recv); @@ -570,5 +556,5 @@ async fn tx_broadcast_limit_reached() { // Can resubmit again now. let _operation_id: String = - tx_api.call("transaction_unstable_broadcast", rpc_params![&xt]).await.unwrap(); + tx_api.call("transaction_v1_broadcast", rpc_params![&xt]).await.unwrap(); } From eba3deca3e61855c237a33013e8a5e82c479e958 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Fri, 19 Apr 2024 12:48:44 +0300 Subject: [PATCH 23/74] txWatch: Stabilize txWatch to version 1 (#4171) This PR stabilizes the txBroadcast API to version 1. Needs from spec: - https://github.com/paritytech/json-rpc-interface-spec/pull/153 - https://github.com/paritytech/json-rpc-interface-spec/pull/154 cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- prdoc/pr_4171.prdoc | 8 ++++++++ substrate/client/rpc-spec-v2/src/transaction/api.rs | 4 ++-- .../src/transaction/tests/transaction_tests.rs | 6 +++--- 3 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_4171.prdoc diff --git a/prdoc/pr_4171.prdoc b/prdoc/pr_4171.prdoc new file mode 100644 index 000000000000..eef45ba922c5 --- /dev/null +++ b/prdoc/pr_4171.prdoc @@ -0,0 +1,8 @@ +title: Stabilize transactionWatch RPC class to version 1 + +doc: + - audience: Node Dev + description: | + The transactionWatch RPC API is stabilized to version 1. + +crates: [ ] diff --git a/substrate/client/rpc-spec-v2/src/transaction/api.rs b/substrate/client/rpc-spec-v2/src/transaction/api.rs index c4a892190589..ed358922d53e 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/api.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/api.rs @@ -33,8 +33,8 @@ pub trait TransactionApi { /// /// This method is unstable and subject to change in the future. #[subscription( - name = "transactionWatch_unstable_submitAndWatch" => "transactionWatch_unstable_watchEvent", - unsubscribe = "transactionWatch_unstable_unwatch", + name = "transactionWatch_v1_submitAndWatch" => "transactionWatch_v1_watchEvent", + unsubscribe = "transactionWatch_v1_unwatch", item = TransactionEvent, )] fn submit_and_watch(&self, bytes: Bytes); diff --git a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs index c83bc948c437..7ce85b9feafe 100644 --- a/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs +++ b/substrate/client/rpc-spec-v2/src/transaction/tests/transaction_tests.rs @@ -38,7 +38,7 @@ async fn tx_invalid_bytes() { // This should not rely on the tx pool state. let mut sub = tx_api - .subscribe_unbounded("transactionWatch_unstable_submitAndWatch", rpc_params![&"0xdeadbeef"]) + .subscribe_unbounded("transactionWatch_v1_submitAndWatch", rpc_params![&"0xdeadbeef"]) .await .unwrap(); @@ -56,7 +56,7 @@ async fn tx_in_finalized() { let xt = hex_string(&uxt.encode()); let mut sub = tx_api - .subscribe_unbounded("transactionWatch_unstable_submitAndWatch", rpc_params![&xt]) + .subscribe_unbounded("transactionWatch_v1_submitAndWatch", rpc_params![&xt]) .await .unwrap(); @@ -95,7 +95,7 @@ async fn tx_with_pruned_best_block() { let xt = hex_string(&uxt.encode()); let mut sub = tx_api - .subscribe_unbounded("transactionWatch_unstable_submitAndWatch", rpc_params![&xt]) + .subscribe_unbounded("transactionWatch_v1_submitAndWatch", rpc_params![&xt]) .await .unwrap(); From 4eabe5e0dddc4cd31ad9dab5645350360d4d36a5 Mon Sep 17 00:00:00 2001 From: maksimryndin Date: Fri, 19 Apr 2024 15:36:36 +0200 Subject: [PATCH 24/74] Pvf refactor execute worker errors follow up (#4071) follow up of https://github.com/paritytech/polkadot-sdk/pull/2604 closes https://github.com/paritytech/polkadot-sdk/pull/2604 - [x] take relevant changes from Marcin's PR - [x] extract common duplicate code for workers (low-hanging fruits) ~Some unpassed ci problems are more general and should be fixed in master (see https://github.com/paritytech/polkadot-sdk/pull/4074)~ Proposed labels: **T0-node**, **R0-silent**, **I4-refactor** ----- kusama address: FZXVQLqLbFV2otNXs6BMnNch54CFJ1idpWwjMb3Z8fTLQC6 --------- Co-authored-by: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> --- Cargo.lock | 6 +- polkadot/node/core/pvf/Cargo.toml | 22 ++- polkadot/node/core/pvf/common/Cargo.toml | 7 +- polkadot/node/core/pvf/common/src/error.rs | 3 + polkadot/node/core/pvf/common/src/execute.rs | 36 ++-- polkadot/node/core/pvf/common/src/lib.rs | 1 + polkadot/node/core/pvf/common/src/pvf.rs | 7 +- .../node/core/pvf/common/src/worker/mod.rs | 84 +++++++- .../node/core/pvf/execute-worker/src/lib.rs | 180 +++++++++--------- .../node/core/pvf/prepare-worker/src/lib.rs | 51 +---- polkadot/node/core/pvf/src/execute/queue.rs | 55 ++++-- .../core/pvf/src/execute/worker_interface.rs | 163 ++++++++-------- polkadot/node/core/pvf/src/host.rs | 5 +- 13 files changed, 333 insertions(+), 287 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7bf5215b6dec..951f2548d34d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7575,9 +7575,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.152" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libflate" @@ -13303,7 +13303,6 @@ dependencies = [ "slotmap", "sp-core", "sp-maybe-compressed-blob", - "sp-wasm-interface 20.0.0", "tempfile", "test-parachain-adder", "test-parachain-halt", @@ -13340,7 +13339,6 @@ name = "polkadot-node-core-pvf-common" version = "7.0.0" dependencies = [ "assert_matches", - "cfg-if", "cpu-time", "futures", "landlock", diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index a0233d6b7517..8bfe2baa42fd 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -17,8 +17,7 @@ cfg-if = "1.0" futures = "0.3.30" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../../gum" } -is_executable = "1.0.1" -libc = "0.2.152" +is_executable = { version = "1.0.1", optional = true } pin-project = "1.0.9" rand = "0.8.5" slotmap = "1.0" @@ -26,7 +25,9 @@ tempfile = "3.3.0" thiserror = { workspace = true } tokio = { version = "1.24.2", features = ["fs", "process"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = [ + "derive", +] } polkadot-parachain-primitives = { path = "../../../parachain" } polkadot-core-primitives = { path = "../../../core-primitives" } @@ -37,14 +38,16 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } -sp-wasm-interface = { path = "../../../../substrate/primitives/wasm-interface" } -sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob" } +sp-maybe-compressed-blob = { path = "../../../../substrate/primitives/maybe-compressed-blob", optional = true } polkadot-node-core-pvf-prepare-worker = { path = "prepare-worker", optional = true } polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = true } [dev-dependencies] assert_matches = "1.4.0" -criterion = { version = "0.4.0", default-features = false, features = ["async_tokio", "cargo_bench_support"] } +criterion = { version = "0.4.0", default-features = false, features = [ + "async_tokio", + "cargo_bench_support", +] } hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } @@ -57,6 +60,7 @@ adder = { package = "test-parachain-adder", path = "../../../parachain/test-para halt = { package = "test-parachain-halt", path = "../../../parachain/test-parachains/halt" } [target.'cfg(target_os = "linux")'.dev-dependencies] +libc = "0.2.153" procfs = "0.16.0" rusty-fork = "0.3.0" sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } @@ -70,6 +74,8 @@ ci-only-tests = [] jemalloc-allocator = ["polkadot-node-core-pvf-common/jemalloc-allocator"] # This feature is used to export test code to other crates without putting it in the production build. test-utils = [ - "polkadot-node-core-pvf-execute-worker", - "polkadot-node-core-pvf-prepare-worker", + "dep:is_executable", + "dep:polkadot-node-core-pvf-execute-worker", + "dep:polkadot-node-core-pvf-prepare-worker", + "dep:sp-maybe-compressed-blob", ] diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index f3eb9d919aae..e1ce6e79cb99 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -10,14 +10,16 @@ license.workspace = true workspace = true [dependencies] -cfg-if = "1.0" cpu-time = "1.0.0" futures = "0.3.30" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.152" +nix = { version = "0.27.1", features = ["resource", "sched"] } thiserror = { workspace = true } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = [ + "derive", +] } polkadot-parachain-primitives = { path = "../../../../parachain" } polkadot-primitives = { path = "../../../../primitives" } @@ -34,7 +36,6 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" -nix = { version = "0.27.1", features = ["sched"] } [target.'cfg(all(target_os = "linux", target_arch = "x86_64"))'.dependencies] seccompiler = "0.4.0" diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index cf274044456f..adeb40c0b195 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -136,6 +136,9 @@ pub enum InternalValidationError { /// Could not find or open compiled artifact file. #[error("validation: could not find or open compiled artifact file: {0}")] CouldNotOpenFile(String), + /// Could not create a pipe between the worker and a child process. + #[error("validation: could not create pipe: {0}")] + CouldNotCreatePipe(String), /// Host could not clear the worker cache after a job. #[error("validation: host could not clear the worker cache ({path:?}) after a job: {err}")] CouldNotClearWorkerDir { diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 18c97b03cbcd..ae6096cacec4 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -30,35 +30,36 @@ pub struct Handshake { /// The response from the execution worker. #[derive(Debug, Encode, Decode)] -pub enum WorkerResponse { - /// The job completed successfully. - Ok { - /// The result of parachain validation. - result_descriptor: ValidationResult, - /// The amount of CPU time taken by the job. - duration: Duration, - }, - /// The candidate is invalid. - InvalidCandidate(String), - /// Instantiation of the WASM module instance failed during an execution. - /// Possibly related to local issues or dirty node update. May be retried with re-preparation. - RuntimeConstruction(String), +pub struct WorkerResponse { + /// The response from the execute job process. + pub job_response: JobResponse, + /// The amount of CPU time taken by the job. + pub duration: Duration, +} + +/// An error occurred in the worker process. +#[derive(thiserror::Error, Debug, Clone, Encode, Decode)] +pub enum WorkerError { /// The job timed out. + #[error("The job timed out")] JobTimedOut, /// The job process has died. We must kill the worker just in case. /// /// We cannot treat this as an internal error because malicious code may have killed the job. /// We still retry it, because in the non-malicious case it is likely spurious. + #[error("The job process (pid {job_pid}) has died: {err}")] JobDied { err: String, job_pid: i32 }, /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, /// etc. /// /// Because malicious code can cause a job error, we must not treat it as an internal error. We /// still retry it, because in the non-malicious case it is likely spurious. - JobError(String), + #[error("An unexpected error occurred in the job process: {0}")] + JobError(#[from] JobError), /// Some internal error occurred. - InternalError(InternalValidationError), + #[error("An internal error occurred: {0}")] + InternalError(#[from] InternalValidationError), } /// The result of a job on the execution worker. @@ -101,7 +102,7 @@ impl JobResponse { /// An unexpected error occurred in the execution job process. Because this comes from the job, /// which executes untrusted code, this error must likewise be treated as untrusted. That is, we /// cannot raise an internal error based on this. -#[derive(thiserror::Error, Debug, Encode, Decode)] +#[derive(thiserror::Error, Clone, Debug, Encode, Decode)] pub enum JobError { #[error("The job timed out")] TimedOut, @@ -114,4 +115,7 @@ pub enum JobError { CouldNotSpawnThread(String), #[error("An error occurred in the CPU time monitor thread: {0}")] CpuTimeMonitorThread(String), + /// Since the job can return any exit status it wants, we have to treat this as untrusted. + #[error("Unexpected exit status: {0}")] + UnexpectedExitStatus(i32), } diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 15097dbd3af5..0cd928201639 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -15,6 +15,7 @@ // along with Polkadot. If not, see . //! Contains functionality related to PVFs that is shared by the PVF host and the PVF workers. +#![deny(unused_crate_dependencies)] pub mod error; pub mod execute; diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 340dffe07c3f..5f248f49b9a3 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -18,12 +18,7 @@ use crate::prepare::PrepareJobKind; use parity_scale_codec::{Decode, Encode}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParams; -use std::{ - cmp::{Eq, PartialEq}, - fmt, - sync::Arc, - time::Duration, -}; +use std::{fmt, sync::Arc, time::Duration}; /// A struct that carries the exhaustive set of data to prepare an artifact out of plain /// Wasm binary diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index d7c95d9e7047..67e7bece407d 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -18,10 +18,13 @@ pub mod security; -use crate::{framed_recv_blocking, SecurityStatus, WorkerHandshake, LOG_TARGET}; +use crate::{ + framed_recv_blocking, framed_send_blocking, SecurityStatus, WorkerHandshake, LOG_TARGET, +}; use cpu_time::ProcessTime; use futures::never::Never; -use parity_scale_codec::Decode; +use nix::{errno::Errno, sys::resource::Usage}; +use parity_scale_codec::{Decode, Encode}; use std::{ any::Any, fmt::{self}, @@ -58,8 +61,6 @@ macro_rules! decl_worker_main { $crate::sp_tracing::try_init_simple(); - let worker_pid = std::process::id(); - let args = std::env::args().collect::>(); if args.len() == 1 { print_help($expected_command); @@ -548,6 +549,81 @@ fn recv_worker_handshake(stream: &mut UnixStream) -> io::Result Ok(worker_handshake) } +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +pub fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +pub fn recv_child_response( + received_data: &mut io::BufReader<&[u8]>, + context: &'static str, +) -> io::Result +where + T: Decode, +{ + let response_bytes = framed_recv_blocking(received_data)?; + T::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("{} pvf recv_child_response: decode error: {}", context, e), + ) + }) +} + +pub fn send_result( + stream: &mut UnixStream, + result: Result, + worker_info: &WorkerInfo, +) -> io::Result<()> +where + T: std::fmt::Debug, + E: std::fmt::Debug + std::fmt::Display, + Result: Encode, +{ + if let Err(ref err) = result { + gum::warn!( + target: LOG_TARGET, + ?worker_info, + "worker: error occurred: {}", + err + ); + } + gum::trace!( + target: LOG_TARGET, + ?worker_info, + "worker: sending result to host: {:?}", + result + ); + + framed_send_blocking(stream, &result.encode()).map_err(|err| { + gum::warn!( + target: LOG_TARGET, + ?worker_info, + "worker: error occurred sending result to host: {}", + err + ); + err + }) +} + +pub fn stringify_errno(context: &'static str, errno: Errno) -> String { + format!("{}: {}: {}", context, errno, io::Error::last_os_error()) +} + /// Functionality related to threads spawned by the workers. /// /// The motivation for this module is to coordinate worker threads without using async Rust. diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index bd7e76010a6d..55f5290bd87e 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -16,6 +16,9 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. +#![deny(unused_crate_dependencies)] +#![warn(missing_docs)] + pub use polkadot_node_core_pvf_common::{ error::ExecuteError, executor_interface::execute_artifact, }; @@ -36,11 +39,12 @@ use nix::{ use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, JobError, JobResponse, JobResult, WorkerResponse}, + execute::{Handshake, JobError, JobResponse, JobResult, WorkerError, WorkerResponse}, executor_interface::params_to_wasmtime_semantics, framed_recv_blocking, framed_send_blocking, worker::{ - cpu_time_monitor_loop, pipe2_cloexec, run_worker, stringify_panic_payload, + cpu_time_monitor_loop, get_total_cpu_usage, pipe2_cloexec, recv_child_response, run_worker, + send_result, stringify_errno, stringify_panic_payload, thread::{self, WaitOutcome}, PipeFd, WorkerInfo, WorkerKind, }, @@ -93,8 +97,14 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { Ok((params, execution_timeout)) } -fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Result<()> { - framed_send_blocking(stream, &response.encode()) +/// Sends an error to the host and returns the original error wrapped in `io::Error`. +macro_rules! map_and_send_err { + ($error:expr, $err_constructor:expr, $stream:expr, $worker_info:expr) => {{ + let err: WorkerError = $err_constructor($error.to_string()).into(); + let io_err = io::Error::new(io::ErrorKind::Other, err.to_string()); + let _ = send_result::($stream, Err(err), $worker_info); + io_err + }}; } /// The entrypoint that the spawned execute worker should start with. @@ -110,8 +120,6 @@ fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Resul /// check is not necessary. /// /// - `worker_version`: see above -/// -/// - `security_status`: contains the detected status of security features. pub fn worker_entrypoint( socket_path: PathBuf, worker_dir_path: PathBuf, @@ -127,13 +135,28 @@ pub fn worker_entrypoint( |mut stream, worker_info, security_status| { let artifact_path = worker_dir::execute_artifact(&worker_info.worker_dir_path); - let Handshake { executor_params } = recv_execute_handshake(&mut stream)?; + let Handshake { executor_params } = + recv_execute_handshake(&mut stream).map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::HostCommunication, + &mut stream, + worker_info + ) + })?; let executor_params: Arc = Arc::new(executor_params); let execute_thread_stack_size = max_stack_size(&executor_params); loop { - let (params, execution_timeout) = recv_request(&mut stream)?; + let (params, execution_timeout) = recv_request(&mut stream).map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::HostCommunication, + &mut stream, + worker_info + ) + })?; gum::debug!( target: LOG_TARGET, ?worker_info, @@ -143,27 +166,34 @@ pub fn worker_entrypoint( ); // Get the artifact bytes. - let compiled_artifact_blob = match std::fs::read(&artifact_path) { - Ok(bytes) => bytes, - Err(err) => { - let response = WorkerResponse::InternalError( - InternalValidationError::CouldNotOpenFile(err.to_string()), - ); - send_response(&mut stream, response)?; - continue - }, - }; - - let (pipe_read_fd, pipe_write_fd) = pipe2_cloexec()?; - - let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { - Ok(usage) => usage, - Err(errno) => { - let response = internal_error_from_errno("getrusage before", errno); - send_response(&mut stream, response)?; - continue - }, - }; + let compiled_artifact_blob = std::fs::read(&artifact_path).map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::CouldNotOpenFile, + &mut stream, + worker_info + ) + })?; + + let (pipe_read_fd, pipe_write_fd) = pipe2_cloexec().map_err(|e| { + map_and_send_err!( + e, + InternalValidationError::CouldNotCreatePipe, + &mut stream, + worker_info + ) + })?; + + let usage_before = nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) + .map_err(|errno| { + let e = stringify_errno("getrusage before", errno); + map_and_send_err!( + e, + InternalValidationError::Kernel, + &mut stream, + worker_info + ) + })?; let stream_fd = stream.as_raw_fd(); let compiled_artifact_blob = Arc::new(compiled_artifact_blob); @@ -222,7 +252,7 @@ pub fn worker_entrypoint( "worker: sending result to host: {:?}", result ); - send_response(&mut stream, result)?; + send_result(&mut stream, result, worker_info)?; } }, ); @@ -270,7 +300,7 @@ fn handle_clone( worker_info: &WorkerInfo, have_unshare_newuser: bool, usage_before: Usage, -) -> io::Result { +) -> io::Result> { use polkadot_node_core_pvf_common::worker::security; // SAFETY: new process is spawned within a single threaded process. This invariant @@ -301,7 +331,8 @@ fn handle_clone( usage_before, execution_timeout, ), - Err(security::clone::Error::Clone(errno)) => Ok(internal_error_from_errno("clone", errno)), + Err(security::clone::Error::Clone(errno)) => + Ok(Err(internal_error_from_errno("clone", errno))), } } @@ -316,7 +347,7 @@ fn handle_fork( execute_worker_stack_size: usize, worker_info: &WorkerInfo, usage_before: Usage, -) -> io::Result { +) -> io::Result> { // SAFETY: new process is spawned within a single threaded process. This invariant // is enforced by tests. match unsafe { nix::unistd::fork() } { @@ -338,7 +369,7 @@ fn handle_fork( usage_before, execution_timeout, ), - Err(errno) => Ok(internal_error_from_errno("fork", errno)), + Err(errno) => Ok(Err(internal_error_from_errno("fork", errno))), } } @@ -483,11 +514,11 @@ fn handle_parent_process( job_pid: Pid, usage_before: Usage, timeout: Duration, -) -> io::Result { +) -> io::Result> { // the read end will wait until all write ends have been closed, // this drop is necessary to avoid deadlock if let Err(errno) = nix::unistd::close(pipe_write_fd) { - return Ok(internal_error_from_errno("closing pipe write fd", errno)); + return Ok(Err(internal_error_from_errno("closing pipe write fd", errno))); }; // SAFETY: pipe_read_fd is an open and owned file descriptor at this point. @@ -512,7 +543,7 @@ fn handle_parent_process( let usage_after = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { Ok(usage) => usage, - Err(errno) => return Ok(internal_error_from_errno("getrusage after", errno)), + Err(errno) => return Ok(Err(internal_error_from_errno("getrusage after", errno))), }; // Using `getrusage` is needed to check whether child has timedout since we cannot rely on @@ -530,32 +561,25 @@ fn handle_parent_process( cpu_tv.as_millis(), timeout.as_millis(), ); - return Ok(WorkerResponse::JobTimedOut) + return Ok(Err(WorkerError::JobTimedOut)) } match status { Ok(WaitStatus::Exited(_, exit_status)) => { let mut reader = io::BufReader::new(received_data.as_slice()); - let result = match recv_child_response(&mut reader) { - Ok(result) => result, - Err(err) => return Ok(WorkerResponse::JobError(err.to_string())), - }; + let result = recv_child_response(&mut reader, "execute")?; match result { - Ok(JobResponse::Ok { result_descriptor }) => { + Ok(job_response) => { // The exit status should have been zero if no error occurred. if exit_status != 0 { - return Ok(WorkerResponse::JobError(format!( - "unexpected exit status: {}", - exit_status - ))) + return Ok(Err(WorkerError::JobError(JobError::UnexpectedExitStatus( + exit_status, + )))); } - Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) + Ok(Ok(WorkerResponse { job_response, duration: cpu_tv })) }, - Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), - Ok(JobResponse::RuntimeConstruction(err)) => - Ok(WorkerResponse::RuntimeConstruction(err)), Err(job_error) => { gum::warn!( target: LOG_TARGET, @@ -565,9 +589,9 @@ fn handle_parent_process( job_error, ); if matches!(job_error, JobError::TimedOut) { - Ok(WorkerResponse::JobTimedOut) + Ok(Err(WorkerError::JobTimedOut)) } else { - Ok(WorkerResponse::JobError(job_error.to_string())) + Ok(Err(WorkerError::JobError(job_error.into()))) } }, } @@ -576,50 +600,21 @@ fn handle_parent_process( // // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some // other reason, so we still need to check for seccomp violations elsewhere. - Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(WorkerResponse::JobDied { + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(Err(WorkerError::JobDied { err: format!("received signal: {signal:?}"), job_pid: job_pid.as_raw(), - }), - Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), + })), + Err(errno) => Ok(Err(internal_error_from_errno("waitpid", errno))), // It is within an attacker's power to send an unexpected exit status. So we cannot treat // this as an internal error (which would make us abstain), but must vote against. - Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied { + Ok(unexpected_wait_status) => Ok(Err(WorkerError::JobDied { err: format!("unexpected status from wait: {unexpected_wait_status:?}"), job_pid: job_pid.as_raw(), - }), + })), } } -/// Calculate the total CPU time from the given `usage` structure, returned from -/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user -/// and system time. -/// -/// # Arguments -/// -/// - `rusage`: Contains resource usage information. -/// -/// # Returns -/// -/// Returns a `Duration` representing the total CPU time. -fn get_total_cpu_usage(rusage: Usage) -> Duration { - let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + - (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; - - return Duration::from_micros(micros) -} - -/// Get a job response. -fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { - let response_bytes = framed_recv_blocking(received_data)?; - JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("execute pvf recv_child_response: decode error: {}", e), - ) - }) -} - /// Write a job response to the pipe and exit process after. /// /// # Arguments @@ -638,15 +633,10 @@ fn send_child_response(pipe_write: &mut PipeFd, response: JobResult) -> ! { } } -fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerResponse { - WorkerResponse::InternalError(InternalValidationError::Kernel(format!( - "{}: {}: {}", - context, - errno, - io::Error::last_os_error() - ))) +fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerError { + WorkerError::InternalError(InternalValidationError::Kernel(stringify_errno(context, errno))) } fn job_error_from_errno(context: &'static str, errno: Errno) -> JobResult { - Err(JobError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error()))) + Err(JobError::Kernel(stringify_errno(context, errno))) } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 82a56107ef53..d1b218f48ae8 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -26,7 +26,6 @@ const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; -use libc; use nix::{ errno::Errno, sys::{ @@ -48,7 +47,8 @@ use polkadot_node_core_pvf_common::{ prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess}, pvf::PvfPrepData, worker::{ - cpu_time_monitor_loop, run_worker, stringify_panic_payload, + cpu_time_monitor_loop, get_total_cpu_usage, recv_child_response, run_worker, send_result, + stringify_errno, stringify_panic_payload, thread::{self, spawn_worker_thread, WaitOutcome}, WorkerKind, }, @@ -117,11 +117,6 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { Ok(pvf) } -/// Send a worker response. -fn send_response(stream: &mut UnixStream, result: PrepareWorkerResult) -> io::Result<()> { - framed_send_blocking(stream, &result.encode()) -} - fn start_memory_tracking(fd: RawFd, limit: Option) { unsafe { // SAFETY: Inside the failure handler, the allocator is locked and no allocations or @@ -178,8 +173,6 @@ fn end_memory_tracking() -> isize { /// /// - `worker_version`: see above /// -/// - `security_status`: contains the detected status of security features. -/// /// # Flow /// /// This runs the following in a loop: @@ -233,8 +226,9 @@ pub fn worker_entrypoint( let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { Ok(usage) => usage, Err(errno) => { - let result = Err(error_from_errno("getrusage before", errno)); - send_response(&mut stream, result)?; + let result: PrepareWorkerResult = + Err(error_from_errno("getrusage before", errno)); + send_result(&mut stream, result, worker_info)?; continue }, }; @@ -294,7 +288,7 @@ pub fn worker_entrypoint( "worker: sending result to host: {:?}", result ); - send_response(&mut stream, result)?; + send_result(&mut stream, result, worker_info)?; } }, ); @@ -666,7 +660,7 @@ fn handle_parent_process( match status { Ok(WaitStatus::Exited(_pid, exit_status)) => { let mut reader = io::BufReader::new(received_data.as_slice()); - let result = recv_child_response(&mut reader) + let result = recv_child_response(&mut reader, "prepare") .map_err(|err| PrepareError::JobError(err.to_string()))?; match result { @@ -726,35 +720,6 @@ fn handle_parent_process( } } -/// Calculate the total CPU time from the given `usage` structure, returned from -/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user -/// and system time. -/// -/// # Arguments -/// -/// - `rusage`: Contains resource usage information. -/// -/// # Returns -/// -/// Returns a `Duration` representing the total CPU time. -fn get_total_cpu_usage(rusage: Usage) -> Duration { - let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + - (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; - - return Duration::from_micros(micros) -} - -/// Get a job response. -fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { - let response_bytes = framed_recv_blocking(received_data)?; - JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { - io::Error::new( - io::ErrorKind::Other, - format!("prepare pvf recv_child_response: decode error: {:?}", e), - ) - }) -} - /// Write a job response to the pipe and exit process after. /// /// # Arguments @@ -774,7 +739,7 @@ fn send_child_response(pipe_write: &mut PipeFd, response: JobResult) -> ! { } fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { - PrepareError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error())) + PrepareError::Kernel(stringify_errno(context, errno)) } type JobResult = Result; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index bdc3c7327b06..af147a2ba227 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -16,7 +16,7 @@ //! A queue that handles requests for PVF execution. -use super::worker_interface::Outcome; +use super::worker_interface::{Error as WorkerInterfaceError, Response as WorkerInterfaceResponse}; use crate::{ artifacts::{ArtifactId, ArtifactPathId}, host::ResultSender, @@ -30,7 +30,10 @@ use futures::{ stream::{FuturesUnordered, StreamExt as _}, Future, FutureExt, }; -use polkadot_node_core_pvf_common::SecurityStatus; +use polkadot_node_core_pvf_common::{ + execute::{JobResponse, WorkerError, WorkerResponse}, + SecurityStatus, +}; use polkadot_primitives::{ExecutorParams, ExecutorParamsHash}; use slotmap::HopSlotMap; use std::{ @@ -133,7 +136,12 @@ impl Workers { enum QueueEvent { Spawn(IdleWorker, WorkerHandle, ExecuteJob), - StartWork(Worker, Outcome, ArtifactId, ResultSender), + StartWork( + Worker, + Result, + ArtifactId, + ResultSender, + ), } type Mux = FuturesUnordered>; @@ -340,23 +348,34 @@ fn handle_worker_spawned( async fn handle_job_finish( queue: &mut Queue, worker: Worker, - outcome: Outcome, + worker_result: Result, artifact_id: ArtifactId, result_tx: ResultSender, ) { - let (idle_worker, result, duration, sync_channel) = match outcome { - Outcome::Ok { result_descriptor, duration, idle_worker } => { + let (idle_worker, result, duration, sync_channel) = match worker_result { + Ok(WorkerInterfaceResponse { + worker_response: + WorkerResponse { job_response: JobResponse::Ok { result_descriptor }, duration }, + idle_worker, + }) => { // TODO: propagate the soft timeout (Some(idle_worker), Ok(result_descriptor), Some(duration), None) }, - Outcome::InvalidCandidate { err, idle_worker } => ( + Ok(WorkerInterfaceResponse { + worker_response: WorkerResponse { job_response: JobResponse::InvalidCandidate(err), .. }, + idle_worker, + }) => ( Some(idle_worker), Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, None, ), - Outcome::RuntimeConstruction { err, idle_worker } => { + Ok(WorkerInterfaceResponse { + worker_response: + WorkerResponse { job_response: JobResponse::RuntimeConstruction(err), .. }, + idle_worker, + }) => { // The task for artifact removal is executed concurrently with // the message to the host on the execution result. let (result_tx, result_rx) = oneshot::channel(); @@ -376,27 +395,31 @@ async fn handle_job_finish( Some(result_rx), ) }, - Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None, None), + + Err(WorkerInterfaceError::InternalError(err)) | + Err(WorkerInterfaceError::WorkerError(WorkerError::InternalError(err))) => + (None, Err(ValidationError::Internal(err)), None, None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. - Outcome::HardTimeout => + Err(WorkerInterfaceError::HardTimeout) | + Err(WorkerInterfaceError::WorkerError(WorkerError::JobTimedOut)) => (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None), // "Maybe invalid" errors (will retry). - Outcome::WorkerIntfErr => ( + Err(WorkerInterfaceError::CommunicationErr(_err)) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, None, ), - Outcome::JobDied { err } => ( + Err(WorkerInterfaceError::WorkerError(WorkerError::JobDied { err, .. })) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, None, ), - Outcome::JobError { err } => ( + Err(WorkerInterfaceError::WorkerError(WorkerError::JobError(err))) => ( None, - Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err.to_string()))), None, None, ), @@ -543,14 +566,14 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { queue.mux.push( async move { let _timer = execution_timer; - let outcome = super::worker_interface::start_work( + let result = super::worker_interface::start_work( idle, job.artifact.clone(), job.exec_timeout, job.params, ) .await; - QueueEvent::StartWork(worker, outcome, job.artifact.id, job.result_tx) + QueueEvent::StartWork(worker, result, job.artifact.id, job.result_tx) } .boxed(), ); diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index db81da118d7b..9dcadfb4c2a7 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -29,10 +29,9 @@ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, WorkerResponse}, + execute::{Handshake, WorkerError, WorkerResponse}, worker_dir, SecurityStatus, }; -use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::ExecutorParams; use std::{path::Path, time::Duration}; use tokio::{io, net::UnixStream}; @@ -69,7 +68,8 @@ pub async fn spawn( gum::warn!( target: LOG_TARGET, worker_pid = %idle_worker.pid, - %err + "failed to send a handshake to the spawned worker: {}", + error ); err })?; @@ -78,39 +78,40 @@ pub async fn spawn( /// Outcome of PVF execution. /// -/// If the idle worker token is not returned, it means the worker must be terminated. -pub enum Outcome { - /// PVF execution completed successfully and the result is returned. The worker is ready for - /// another job. - Ok { result_descriptor: ValidationResult, duration: Duration, idle_worker: IdleWorker }, - /// The candidate validation failed. It may be for example because the wasm execution triggered - /// a trap. Errors related to the preparation process are not expected to be encountered by the - /// execution workers. - InvalidCandidate { err: String, idle_worker: IdleWorker }, - /// The error is probably transient. It may be for example - /// because the artifact was prepared with a Wasmtime version different from the version - /// in the current execution environment. - RuntimeConstruction { err: String, idle_worker: IdleWorker }, +/// PVF execution completed and the result is returned. The worker is ready for +/// another job. +pub struct Response { + /// The response (valid/invalid) from the worker. + pub worker_response: WorkerResponse, + /// Returning the idle worker token means the worker can be reused. + pub idle_worker: IdleWorker, +} +/// The idle worker token is not returned for any of these cases, meaning the worker must be +/// terminated. +/// +/// NOTE: Errors related to the preparation process are not expected to be encountered by the +/// execution workers. +#[derive(thiserror::Error, Debug)] +pub enum Error { /// The execution time exceeded the hard limit. The worker is terminated. + #[error("The communication with the worker exceeded the hard limit")] HardTimeout, /// An I/O error happened during communication with the worker. This may mean that the worker /// process already died. The token is not returned in any case. - WorkerIntfErr, - /// The job process has died. We must kill the worker just in case. - /// - /// We cannot treat this as an internal error because malicious code may have caused this. - JobDied { err: String }, - /// An unexpected error occurred in the job process. - /// - /// Because malicious code can cause a job error, we must not treat it as an internal error. - JobError { err: String }, + #[error("An I/O error happened during communication with the worker: {0}")] + CommunicationErr(#[from] io::Error), + /// The worker reported an error (can be from itself or from the job). The worker should not be + /// reused. + #[error("The worker reported an error: {0}")] + WorkerError(#[from] WorkerError), /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. /// /// Should only ever be used for errors independent of the candidate and PVF. Therefore it may /// be a problem with the worker, so we terminate it. - InternalError { err: InternalValidationError }, + #[error("An internal error occurred: {0}")] + InternalError(#[from] InternalValidationError), } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -123,7 +124,7 @@ pub async fn start_work( artifact: ArtifactPathId, execution_timeout: Duration, validation_params: Vec, -) -> Outcome { +) -> Result { let IdleWorker { mut stream, pid, worker_dir } = worker; gum::debug!( @@ -136,16 +137,18 @@ pub async fn start_work( ); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { - if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - validation_code_hash = ?artifact.id.code_hash, - ?error, - "failed to send an execute request", - ); - return Outcome::WorkerIntfErr - } + send_request(&mut stream, &validation_params, execution_timeout).await.map_err( + |error| { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + validation_code_hash = ?artifact.id.code_hash, + "failed to send an execute request: {}", + error, + ); + Error::InternalError(InternalValidationError::HostCommunication(error.to_string())) + }, + )?; // We use a generous timeout here. This is in addition to the one in the child process, in // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout @@ -153,12 +156,12 @@ pub async fn start_work( // load, but the CPU resources of the child can only be measured from the parent after the // child process terminates. let timeout = execution_timeout * JOB_TIMEOUT_WALL_CLOCK_FACTOR; - let response = futures::select! { - response = recv_response(&mut stream).fuse() => { - match response { - Ok(response) => - handle_response( - response, + let worker_result = futures::select! { + worker_result = recv_result(&mut stream).fuse() => { + match worker_result { + Ok(result) => + handle_result( + result, pid, execution_timeout, ) @@ -168,11 +171,11 @@ pub async fn start_work( target: LOG_TARGET, worker_pid = %pid, validation_code_hash = ?artifact.id.code_hash, - ?error, - "failed to recv an execute response", + "failed to recv an execute result: {}", + error, ); - return Outcome::WorkerIntfErr + return Err(Error::CommunicationErr(error)) }, } }, @@ -183,29 +186,16 @@ pub async fn start_work( validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded lenient timeout for execution, child worker likely stalled", ); - WorkerResponse::JobTimedOut + return Err(Error::HardTimeout) }, }; - match response { - WorkerResponse::Ok { result_descriptor, duration } => Outcome::Ok { - result_descriptor, - duration, - idle_worker: IdleWorker { stream, pid, worker_dir }, - }, - WorkerResponse::InvalidCandidate(err) => Outcome::InvalidCandidate { - err, - idle_worker: IdleWorker { stream, pid, worker_dir }, - }, - WorkerResponse::RuntimeConstruction(err) => Outcome::RuntimeConstruction { - err, + match worker_result { + Ok(worker_response) => Ok(Response { + worker_response, idle_worker: IdleWorker { stream, pid, worker_dir }, - }, - WorkerResponse::JobTimedOut => Outcome::HardTimeout, - WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, - WorkerResponse::JobError(err) => Outcome::JobError { err }, - - WorkerResponse::InternalError(err) => Outcome::InternalError { err }, + }), + Err(worker_error) => Err(worker_error.into()), } }) .await @@ -215,12 +205,12 @@ pub async fn start_work( /// /// Here we know the artifact exists, but is still located in a temporary file which will be cleared /// by [`with_worker_dir_setup`]. -async fn handle_response( - response: WorkerResponse, +async fn handle_result( + worker_result: Result, worker_pid: u32, execution_timeout: Duration, -) -> WorkerResponse { - if let WorkerResponse::Ok { duration, .. } = response { +) -> Result { + if let Ok(WorkerResponse { duration, .. }) = worker_result { if duration > execution_timeout { // The job didn't complete within the timeout. gum::warn!( @@ -232,11 +222,11 @@ async fn handle_response( ); // Return a timeout error. - return WorkerResponse::JobTimedOut + return Err(WorkerError::JobTimedOut) } } - response + worker_result } /// Create a temporary file for an artifact in the worker cache, execute the given future/closure @@ -249,9 +239,9 @@ async fn with_worker_dir_setup( pid: u32, artifact_path: &Path, f: F, -) -> Outcome +) -> Result where - Fut: futures::Future, + Fut: futures::Future>, F: FnOnce(WorkerDir) -> Fut, { // Cheaply create a hard link to the artifact. The artifact is always at a known location in the @@ -263,16 +253,14 @@ where target: LOG_TARGET, worker_pid = %pid, ?worker_dir, - "failed to clear worker cache after the job: {:?}", + "failed to clear worker cache after the job: {}", err, ); - return Outcome::InternalError { - err: InternalValidationError::CouldNotCreateLink(format!("{:?}", err)), - } + return Err(InternalValidationError::CouldNotCreateLink(format!("{:?}", err)).into()); } let worker_dir_path = worker_dir.path().to_owned(); - let outcome = f(worker_dir).await; + let result = f(worker_dir).await; // Try to clear the worker dir. if let Err(err) = clear_worker_dir_path(&worker_dir_path) { @@ -283,15 +271,14 @@ where "failed to clear worker cache after the job: {:?}", err, ); - return Outcome::InternalError { - err: InternalValidationError::CouldNotClearWorkerDir { - err: format!("{:?}", err), - path: worker_dir_path.to_str().map(String::from), - }, + return Err(InternalValidationError::CouldNotClearWorkerDir { + err: format!("{:?}", err), + path: worker_dir_path.to_str().map(String::from), } + .into()) } - outcome + result } /// Sends a handshake with information specific to the execute worker. @@ -308,12 +295,12 @@ async fn send_request( framed_send(stream, &execution_timeout.encode()).await } -async fn recv_response(stream: &mut UnixStream) -> io::Result { - let response_bytes = framed_recv(stream).await?; - WorkerResponse::decode(&mut response_bytes.as_slice()).map_err(|e| { +async fn recv_result(stream: &mut UnixStream) -> io::Result> { + let result_bytes = framed_recv(stream).await?; + Result::::decode(&mut result_bytes.as_slice()).map_err(|e| { io::Error::new( io::ErrorKind::Other, - format!("execute pvf recv_response: decode error: {:?}", e), + format!("execute pvf recv_result: decode error: {:?}", e), ) }) } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 247d753d7c44..2d180fc59295 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -959,10 +959,7 @@ pub(crate) mod tests { use crate::{artifacts::generate_artifact_path, PossiblyInvalidError}; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::{ - error::PrepareError, - prepare::{PrepareStats, PrepareSuccess}, - }; + use polkadot_node_core_pvf_common::prepare::PrepareStats; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); From e504c41a5adbd5e6d9a7764c07f6dcf47b2dae77 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sat, 20 Apr 2024 02:05:34 +0200 Subject: [PATCH 25/74] Allow privileged virtual bond in Staking pallet (#3889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the first PR in preparation for https://github.com/paritytech/polkadot-sdk/issues/454. ## Follow ups: - https://github.com/paritytech/polkadot-sdk/pull/3904. - https://github.com/paritytech/polkadot-sdk/pull/3905. Overall changes are documented here (lot more visual 😍): https://hackmd.io/@ak0n/454-np-governance [Maybe followup](https://github.com/paritytech/polkadot-sdk/issues/4217) with migration of storage item `VirtualStakers` as a bool or enum in `Ledger`. ## Context We want to achieve a way for a user (`Delegator`) to delegate their funds to another account (`Agent`). Delegate implies the funds are locked in delegator account itself. Agent can act on behalf of delegator to stake directly on Staking pallet. The delegation feature is added to Staking via another pallet `delegated-staking` worked on [here](https://github.com/paritytech/polkadot-sdk/pull/3904). ## Introduces: ### StakingUnchecked Trait As the name implies, this trait allows unchecked (non-locked) mutation of staking ledger. These apis are only meant to be used by other pallets in the runtime and should not be exposed directly to user code path. Also related: https://github.com/paritytech/polkadot-sdk/issues/3888. ### Virtual Bond Allows other pallets to stake via staking pallet while managing the locks on these accounts themselves. Introduces another storage `VirtualStakers` that whitelist these accounts. We also restrict virtual stakers to set reward account as themselves. Since the account has no locks, we cannot support compounding of rewards. Conservatively, we require them to set a separate account different from the staker. Since these are code managed, it should be easy for another pallet to redistribute reward and rebond them. ### Slashes Since there is no actual lock maintained by staking-pallet for virtual stakers, this pallet does not apply any slashes. It is then important for pallets managing virtual stakers to listen to slashing events and apply necessary slashes. --- prdoc/pr_3889.prdoc | 14 ++ substrate/frame/nomination-pools/src/mock.rs | 8 + substrate/frame/staking/src/ledger.rs | 43 ++-- substrate/frame/staking/src/mock.rs | 23 +- substrate/frame/staking/src/pallet/impls.rs | 155 +++++++++++- substrate/frame/staking/src/pallet/mod.rs | 39 ++- substrate/frame/staking/src/slashing.rs | 22 +- substrate/frame/staking/src/tests.rs | 243 ++++++++++++++++++- substrate/primitives/staking/src/lib.rs | 38 ++- 9 files changed, 513 insertions(+), 72 deletions(-) create mode 100644 prdoc/pr_3889.prdoc diff --git a/prdoc/pr_3889.prdoc b/prdoc/pr_3889.prdoc new file mode 100644 index 000000000000..b32ffcc214c0 --- /dev/null +++ b/prdoc/pr_3889.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Allow privileged virtual bond into pallet Staking + +doc: + - audience: Runtime Dev + description: | + Introduces a new low level API to allow privileged virtual bond into pallet Staking. This allows other pallets + to stake funds into staking pallet while managing the fund lock and unlocking process themselves. + +crates: + - name: pallet-staking + diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index b9301a400953..686402b84349 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -131,6 +131,10 @@ impl sp_staking::StakingInterface for StakingMock { Ok(()) } + fn update_payee(_stash: &Self::AccountId, _reward_acc: &Self::AccountId) -> DispatchResult { + unimplemented!("method currently not used in testing") + } + fn chill(_: &Self::AccountId) -> sp_runtime::DispatchResult { Ok(()) } @@ -223,6 +227,10 @@ impl sp_staking::StakingInterface for StakingMock { fn max_exposure_page_size() -> sp_staking::Page { unimplemented!("method currently not used in testing") } + + fn slash_reward_fraction() -> Perbill { + unimplemented!("method currently not used in testing") + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/substrate/frame/staking/src/ledger.rs b/substrate/frame/staking/src/ledger.rs index 9461daefed65..67a86b86226c 100644 --- a/substrate/frame/staking/src/ledger.rs +++ b/substrate/frame/staking/src/ledger.rs @@ -33,13 +33,14 @@ use frame_support::{ defensive, ensure, - traits::{Defensive, LockableCurrency, WithdrawReasons}, + traits::{Defensive, LockableCurrency}, }; use sp_staking::StakingAccount; use sp_std::prelude::*; use crate::{ - BalanceOf, Bonded, Config, Error, Ledger, Payee, RewardDestination, StakingLedger, STAKING_ID, + BalanceOf, Bonded, Config, Error, Ledger, Pallet, Payee, RewardDestination, StakingLedger, + VirtualStakers, STAKING_ID, }; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -187,7 +188,17 @@ impl StakingLedger { return Err(Error::::NotStash) } - T::Currency::set_lock(STAKING_ID, &self.stash, self.total, WithdrawReasons::all()); + // We skip locking virtual stakers. + if !Pallet::::is_virtual_staker(&self.stash) { + // for direct stakers, update lock on stash based on ledger. + T::Currency::set_lock( + STAKING_ID, + &self.stash, + self.total, + frame_support::traits::WithdrawReasons::all(), + ); + } + Ledger::::insert( &self.controller().ok_or_else(|| { defensive!("update called on a ledger that is not bonded."); @@ -204,22 +215,22 @@ impl StakingLedger { /// It sets the reward preferences for the bonded stash. pub(crate) fn bond(self, payee: RewardDestination) -> Result<(), Error> { if >::contains_key(&self.stash) { - Err(Error::::AlreadyBonded) - } else { - >::insert(&self.stash, payee); - >::insert(&self.stash, &self.stash); - self.update() + return Err(Error::::AlreadyBonded) } + + >::insert(&self.stash, payee); + >::insert(&self.stash, &self.stash); + self.update() } /// Sets the ledger Payee. pub(crate) fn set_payee(self, payee: RewardDestination) -> Result<(), Error> { if !>::contains_key(&self.stash) { - Err(Error::::NotStash) - } else { - >::insert(&self.stash, payee); - Ok(()) + return Err(Error::::NotStash) } + + >::insert(&self.stash, payee); + Ok(()) } /// Sets the ledger controller to its stash. @@ -252,12 +263,16 @@ impl StakingLedger { let controller = >::get(stash).ok_or(Error::::NotStash)?; >::get(&controller).ok_or(Error::::NotController).map(|ledger| { - T::Currency::remove_lock(STAKING_ID, &ledger.stash); Ledger::::remove(controller); - >::remove(&stash); >::remove(&stash); + // kill virtual staker if it exists. + if >::take(&stash).is_none() { + // if not virtual staker, clear locks. + T::Currency::remove_lock(STAKING_ID, &ledger.stash); + } + Ok(()) })? } diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 6db462c1a70f..b46b863c016e 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -249,17 +249,21 @@ parameter_types! { pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub static SlashObserver: BTreeMap> = BTreeMap::new(); } pub struct EventListenerMock; impl OnStakingUpdate for EventListenerMock { fn on_slash( - _pool_account: &AccountId, + pool_account: &AccountId, slashed_bonded: Balance, slashed_chunks: &BTreeMap, - _total_slashed: Balance, + total_slashed: Balance, ) { LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); + SlashObserver::mutate(|map| { + map.insert(*pool_account, map.get(pool_account).unwrap_or(&0) + total_slashed) + }); } } @@ -598,6 +602,21 @@ pub(crate) fn bond_nominator(who: AccountId, val: Balance, target: Vec, +) { + // In a real scenario, `who` is a keyless account managed by another pallet which provides for + // it. + System::inc_providers(&who); + + // Bond who virtually. + assert_ok!(::virtual_bond(&who, val, &payee)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(who), target)); +} + /// Progress to the given block, triggering session and era changes as we progress. /// /// This will finalize the previous block, initialize up to the given block, essentially simulating diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 2f43e4847e45..0c0ef0dbf463 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -28,15 +28,18 @@ use frame_support::{ pallet_prelude::*, traits::{ Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, - InspectLockableCurrency, Len, OnUnbalanced, TryCollect, UnixTime, + InspectLockableCurrency, Len, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; use sp_runtime::{ - traits::{Bounded, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero}, - Perbill, Percent, + traits::{ + Bounded, CheckedAdd, CheckedSub, Convert, One, SaturatedConversion, Saturating, + StaticLookup, Zero, + }, + ArithmeticError, Perbill, Percent, }; use sp_staking::{ currency_to_vote::CurrencyToVote, @@ -149,6 +152,39 @@ impl Pallet { Self::slashable_balance_of_vote_weight(who, issuance) } + pub(super) fn do_bond_extra(stash: &T::AccountId, additional: BalanceOf) -> DispatchResult { + let mut ledger = Self::ledger(StakingAccount::Stash(stash.clone()))?; + + // for virtual stakers, we don't need to check the balance. Since they are only accessed + // via low level apis, we can assume that the caller has done the due diligence. + let extra = if Self::is_virtual_staker(stash) { + additional + } else { + // additional amount or actual balance of stash whichever is lower. + additional.min( + T::Currency::free_balance(stash) + .checked_sub(&ledger.total) + .ok_or(ArithmeticError::Overflow)?, + ) + }; + + ledger.total = ledger.total.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; + ledger.active = ledger.active.checked_add(&extra).ok_or(ArithmeticError::Overflow)?; + // last check: the new active amount of ledger must be more than ED. + ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); + + // NOTE: ledger must be updated prior to calling `Self::weight_of`. + ledger.update()?; + // update this staker in the sorted list, if they exist in it. + if T::VoterList::contains(stash) { + let _ = T::VoterList::on_update(&stash, Self::weight_of(stash)).defensive(); + } + + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: extra }); + + Ok(()) + } + pub(super) fn do_withdraw_unbonded( controller: &T::AccountId, num_slashing_spans: u32, @@ -1132,6 +1168,11 @@ impl Pallet { ) -> Exposure> { EraInfo::::get_full_exposure(era, account) } + + /// Whether `who` is a virtual staker whose funds are managed by another pallet. + pub(crate) fn is_virtual_staker(who: &T::AccountId) -> bool { + VirtualStakers::::contains_key(who) + } } impl Pallet { @@ -1748,6 +1789,23 @@ impl StakingInterface for Pallet { .map(|_| ()) } + fn update_payee(stash: &Self::AccountId, reward_acc: &Self::AccountId) -> DispatchResult { + // Since virtual stakers are not allowed to compound their rewards as this pallet does not + // manage their locks, we do not allow reward account to be set same as stash. For + // external pallets that manage the virtual bond, they can claim rewards and re-bond them. + ensure!( + !Self::is_virtual_staker(stash) || stash != reward_acc, + Error::::RewardDestinationRestricted + ); + + // since controller is deprecated and this function is never used for old ledgers with + // distinct controllers, we can safely assume that stash is the controller. + Self::set_payee( + RawOrigin::Signed(stash.clone()).into(), + RewardDestination::Account(reward_acc.clone()), + ) + } + fn chill(who: &Self::AccountId) -> DispatchResult { // defensive-only: any account bonded via this interface has the stash set as the // controller, but we have to be sure. Same comment anywhere else that we read this. @@ -1832,6 +1890,10 @@ impl StakingInterface for Pallet { } } + fn slash_reward_fraction() -> Perbill { + SlashRewardFraction::::get() + } + sp_staking::runtime_benchmarks_enabled! { fn nominations(who: &Self::AccountId) -> Option> { Nominators::::get(who).map(|n| n.targets.into_inner()) @@ -1860,6 +1922,55 @@ impl StakingInterface for Pallet { } } +impl sp_staking::StakingUnchecked for Pallet { + fn migrate_to_virtual_staker(who: &Self::AccountId) { + T::Currency::remove_lock(crate::STAKING_ID, who); + VirtualStakers::::insert(who, ()); + } + + /// Virtually bonds `keyless_who` to `payee` with `value`. + /// + /// The payee must not be the same as the `keyless_who`. + fn virtual_bond( + keyless_who: &Self::AccountId, + value: Self::Balance, + payee: &Self::AccountId, + ) -> DispatchResult { + if StakingLedger::::is_bonded(StakingAccount::Stash(keyless_who.clone())) { + return Err(Error::::AlreadyBonded.into()) + } + + // check if payee not same as who. + ensure!(keyless_who != payee, Error::::RewardDestinationRestricted); + + // mark this pallet as consumer of `who`. + frame_system::Pallet::::inc_consumers(&keyless_who).map_err(|_| Error::::BadState)?; + + // mark who as a virtual staker. + VirtualStakers::::insert(keyless_who, ()); + + Self::deposit_event(Event::::Bonded { stash: keyless_who.clone(), amount: value }); + let ledger = StakingLedger::::new(keyless_who.clone(), value); + + ledger.bond(RewardDestination::Account(payee.clone()))?; + + Ok(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn migrate_to_direct_staker(who: &Self::AccountId) { + assert!(VirtualStakers::::contains_key(who)); + let ledger = StakingLedger::::get(Stash(who.clone())).unwrap(); + T::Currency::set_lock( + crate::STAKING_ID, + who, + ledger.total, + frame_support::traits::WithdrawReasons::all(), + ); + VirtualStakers::::remove(who); + } +} + #[cfg(any(test, feature = "try-runtime"))] impl Pallet { pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), TryRuntimeError> { @@ -1980,16 +2091,44 @@ impl Pallet { /// Invariants: /// * Stake consistency: ledger.total == ledger.active + sum(ledger.unlocking). /// * The ledger's controller and stash matches the associated `Bonded` tuple. - /// * Staking locked funds for every bonded stash should be the same as its ledger's total. + /// * Staking locked funds for every bonded stash (non virtual stakers) should be the same as + /// its ledger's total. + /// * For virtual stakers, locked funds should be zero and payee should be non-stash account. /// * Staking ledger and bond are not corrupted. fn check_ledgers() -> Result<(), TryRuntimeError> { Bonded::::iter() .map(|(stash, ctrl)| { // ensure locks consistency. - ensure!( - Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), - "bond, ledger and/or staking lock inconsistent for a bonded stash." - ); + if VirtualStakers::::contains_key(stash.clone()) { + ensure!( + T::Currency::balance_locked(crate::STAKING_ID, &stash) == Zero::zero(), + "virtual stakers should not have any locked balance" + ); + ensure!( + >::get(stash.clone()).unwrap() == stash.clone(), + "stash and controller should be same" + ); + ensure!( + Ledger::::get(stash.clone()).unwrap().stash == stash, + "ledger corrupted for virtual staker" + ); + let reward_destination = >::get(stash.clone()).unwrap(); + if let RewardDestination::Account(payee) = reward_destination { + ensure!( + payee != stash.clone(), + "reward destination should not be same as stash for virtual staker" + ); + } else { + return Err(DispatchError::Other( + "reward destination must be of account variant for virtual staker", + )); + } + } else { + ensure!( + Self::inspect_bond_state(&stash) == Ok(LedgerIntegrityState::Ok), + "bond, ledger and/or staking lock inconsistent for a bonded stash." + ); + } // ensure ledger consistency. Self::ensure_ledger_consistent(ctrl) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 2e5b3aa7b873..76ddad6f1359 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -32,7 +32,7 @@ use frame_support::{ }; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ - traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, + traits::{SaturatedConversion, StaticLookup, Zero}, ArithmeticError, Perbill, Percent, }; @@ -379,6 +379,15 @@ pub mod pallet { pub type Nominators = CountedStorageMap<_, Twox64Concat, T::AccountId, Nominations>; + /// Stakers whose funds are managed by other pallets. + /// + /// This pallet does not apply any locks on them, therefore they are only virtually bonded. They + /// are expected to be keyless accounts and hence should not be allowed to mutate their ledger + /// directly via this pallet. Instead, these accounts are managed by other pallets and accessed + /// via low level apis. We keep track of them to do minimal integrity checks. + #[pallet::storage] + pub type VirtualStakers = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; + /// The maximum nominator count before we stop allowing new validators to join. /// /// When this value is not set, no limits are enforced. @@ -858,6 +867,10 @@ pub mod pallet { ControllerDeprecated, /// Cannot reset a ledger. CannotRestoreLedger, + /// Provided reward destination is not allowed. + RewardDestinationRestricted, + /// Not enough funds available to withdraw. + NotEnoughFunds, } #[pallet::hooks] @@ -985,29 +998,7 @@ pub mod pallet { #[pallet::compact] max_additional: BalanceOf, ) -> DispatchResult { let stash = ensure_signed(origin)?; - let mut ledger = Self::ledger(StakingAccount::Stash(stash.clone()))?; - - let stash_balance = T::Currency::free_balance(&stash); - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { - let extra = extra.min(max_additional); - ledger.total += extra; - ledger.active += extra; - // Last check: the new active amount of ledger must be more than ED. - ensure!( - ledger.active >= T::Currency::minimum_balance(), - Error::::InsufficientBond - ); - - // NOTE: ledger must be updated prior to calling `Self::weight_of`. - ledger.update()?; - // update this staker in the sorted list, if they exist in it. - if T::VoterList::contains(&stash) { - let _ = T::VoterList::on_update(&stash, Self::weight_of(&stash)).defensive(); - } - - Self::deposit_event(Event::::Bonded { stash, amount: extra }); - } - Ok(()) + Self::do_bond_extra(&stash, max_additional) } /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 709fd1441ec3..2011e9eb8301 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -609,8 +609,13 @@ pub fn do_slash( }; let value = ledger.slash(value, T::Currency::minimum_balance(), slash_era); + if value.is_zero() { + // nothing to do + return + } - if !value.is_zero() { + // Skip slashing for virtual stakers. The pallets managing them should handle the slashing. + if !Pallet::::is_virtual_staker(stash) { let (imbalance, missing) = T::Currency::slash(stash, value); slashed_imbalance.subsume(imbalance); @@ -618,17 +623,14 @@ pub fn do_slash( // deduct overslash from the reward payout *reward_payout = reward_payout.saturating_sub(missing); } + } - let _ = ledger - .update() - .defensive_proof("ledger fetched from storage so it exists in storage; qed."); + let _ = ledger + .update() + .defensive_proof("ledger fetched from storage so it exists in storage; qed."); - // trigger the event - >::deposit_event(super::Event::::Slashed { - staker: stash.clone(), - amount: value, - }); - } + // trigger the event + >::deposit_event(super::Event::::Slashed { staker: stash.clone(), amount: value }); } /// Apply a previously-unapplied slash. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index a5c9abe2f176..87f6fd424bd7 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -27,7 +27,7 @@ use frame_support::{ assert_noop, assert_ok, assert_storage_noop, dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}, pallet_prelude::*, - traits::{Currency, Get, ReservableCurrency}, + traits::{Currency, Get, InspectLockableCurrency, ReservableCurrency}, }; use mock::*; @@ -623,12 +623,8 @@ fn nominating_and_rewards_should_work() { )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 21, 31])); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(3), - 1000, - RewardDestination::Account(3) - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![11, 21, 41])); + // the second nominator is virtual. + bond_virtual_nominator(3, 333, 1000, vec![11, 21, 41]); // the total reward for era 0 let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -694,10 +690,12 @@ fn nominating_and_rewards_should_work() { ); // Nominator 3: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 21]'s reward. ==> // 2/9 + 3/11 + assert_eq!(Balances::total_balance(&3), initial_balance); + // 333 is the reward destination for 3. assert_eq_error_rate!( - Balances::total_balance(&3), - initial_balance + (2 * payout_for_11 / 9 + 3 * payout_for_21 / 11), - 2, + Balances::total_balance(&333), + 2 * payout_for_11 / 9 + 3 * payout_for_21 / 11, + 2 ); // Validator 11: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 @@ -1893,7 +1891,7 @@ fn reap_stash_works() { .balance_factor(10) .build_and_execute(|| { // given - assert_eq!(Balances::free_balance(11), 10 * 1000); + assert_eq!(Balances::balance_locked(STAKING_ID, &11), 10 * 1000); assert_eq!(Staking::bonded(&11), Some(11)); assert!(>::contains_key(&11)); @@ -1919,6 +1917,8 @@ fn reap_stash_works() { assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); assert!(!>::contains_key(&11)); + // lock is removed. + assert_eq!(Balances::balance_locked(STAKING_ID, &11), 0); }); } @@ -6849,6 +6849,226 @@ mod staking_interface { } } +mod staking_unchecked { + use sp_staking::{Stake, StakingInterface, StakingUnchecked}; + + use super::*; + + #[test] + fn virtual_bond_does_not_lock() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + assert_eq!(Balances::free_balance(10), 1); + // 10 can bond more than its balance amount since we do not require lock for virtual + // bonding. + assert_ok!(::virtual_bond(&10, 100, &15)); + // nothing is locked on 10. + assert_eq!(Balances::balance_locked(STAKING_ID, &10), 0); + // adding more balance does not lock anything as well. + assert_ok!(::bond_extra(&10, 1000)); + // but ledger is updated correctly. + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 1100 }) + ); + + // lets try unbonding some amount. + assert_ok!(::unbond(&10, 200)); + assert_eq!( + Staking::ledger(10.into()).unwrap(), + StakingLedgerInspect { + stash: 10, + total: 1100, + active: 1100 - 200, + unlocking: bounded_vec![UnlockChunk { value: 200, era: 1 + 3 }], + legacy_claimed_rewards: bounded_vec![], + } + ); + + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 900 }) + ); + // still no locks. + assert_eq!(Balances::balance_locked(STAKING_ID, &10), 0); + + mock::start_active_era(2); + // cannot withdraw without waiting for unbonding period. + assert_ok!(::withdraw_unbonded(10, 0)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 1100, active: 900 }) + ); + + // in era 4, 10 can withdraw unlocking amount. + mock::start_active_era(4); + assert_ok!(::withdraw_unbonded(10, 0)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 900, active: 900 }) + ); + + // unbond all. + assert_ok!(::unbond(&10, 900)); + assert_eq!( + ::stake(&10), + Ok(Stake { total: 900, active: 0 }) + ); + mock::start_active_era(7); + assert_ok!(::withdraw_unbonded(10, 0)); + + // ensure withdrawing all amount cleans up storage. + assert_eq!(Staking::ledger(10.into()), Err(Error::::NotStash)); + assert_eq!(VirtualStakers::::contains_key(10), false); + }) + } + + #[test] + fn virtual_staker_cannot_pay_reward_to_self_account() { + ExtBuilder::default().build_and_execute(|| { + // cannot set payee to self + assert_noop!( + ::virtual_bond(&10, 100, &10), + Error::::RewardDestinationRestricted + ); + + // to another account works + assert_ok!(::virtual_bond(&10, 100, &11)); + + // cannot set via set_payee as well. + assert_noop!( + ::update_payee(&10, &10), + Error::::RewardDestinationRestricted + ); + }); + } + + #[test] + fn virtual_staker_cannot_bond_again() { + ExtBuilder::default().build_and_execute(|| { + // 200 virtual bonds + bond_virtual_nominator(200, 201, 500, vec![11, 21]); + + // Tries bonding again + assert_noop!( + ::virtual_bond(&200, 200, &201), + Error::::AlreadyBonded + ); + + // And again with a different reward destination. + assert_noop!( + ::virtual_bond(&200, 200, &202), + Error::::AlreadyBonded + ); + + // Direct bond is not allowed as well. + assert_noop!( + ::bond(&200, 200, &202), + Error::::AlreadyBonded + ); + }); + } + + #[test] + fn normal_staker_cannot_virtual_bond() { + ExtBuilder::default().build_and_execute(|| { + // 101 is a nominator trying to virtual bond + assert_noop!( + ::virtual_bond(&101, 200, &102), + Error::::AlreadyBonded + ); + + // validator 21 tries to virtual bond + assert_noop!( + ::virtual_bond(&21, 200, &22), + Error::::AlreadyBonded + ); + }); + } + + #[test] + fn migrate_virtual_staker() { + ExtBuilder::default().build_and_execute(|| { + // give some balance to 200 + Balances::make_free_balance_be(&200, 2000); + + // stake + assert_ok!(Staking::bond(RuntimeOrigin::signed(200), 1000, RewardDestination::Staked)); + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &200), 1000); + + // migrate them to virtual staker + ::migrate_to_virtual_staker(&200); + // payee needs to be updated to a non-stash account. + assert_ok!(::update_payee(&200, &201)); + + // ensure the balance is not locked anymore + assert_eq!(Balances::balance_locked(crate::STAKING_ID, &200), 0); + + // and they are marked as virtual stakers + assert_eq!(Pallet::::is_virtual_staker(&200), true); + }); + } + + #[test] + fn virtual_nominators_are_lazily_slashed() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], + &[slash_percent], + ); + + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); + + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + + // validator balance is slashed as usual + assert_eq!(balances(&11).0, validator_balance - validator_share); + // Because slashing happened. + assert!(is_disabled(11)); + + // but virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // but slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); + }) + } +} mod ledger { use super::*; @@ -7327,7 +7547,6 @@ mod ledger { mod ledger_recovery { use super::*; - use frame_support::traits::InspectLockableCurrency; #[test] fn inspect_recovery_ledger_simple_works() { diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 11b7ef41b9a7..ad6cc6e2f4ff 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -29,7 +29,7 @@ use core::ops::Sub; use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Zero}, - DispatchError, DispatchResult, RuntimeDebug, Saturating, + DispatchError, DispatchResult, Perbill, RuntimeDebug, Saturating, }; pub mod offence; @@ -254,6 +254,9 @@ pub trait StakingInterface { /// schedules have reached their unlocking era should allow more calls to this function. fn unbond(stash: &Self::AccountId, value: Self::Balance) -> DispatchResult; + /// Update the reward destination for the ledger associated with the stash. + fn update_payee(stash: &Self::AccountId, reward_acc: &Self::AccountId) -> DispatchResult; + /// Unlock any funds schedule to unlock before or at the current era. /// /// Returns whether the stash was killed because of this withdraw or not. @@ -274,7 +277,7 @@ pub trait StakingInterface { /// Checks whether an account `staker` has been exposed in an era. fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool; - /// Return the status of the given staker, `None` if not staked at all. + /// Return the status of the given staker, `Err` if not staked at all. fn status(who: &Self::AccountId) -> Result, DispatchError>; /// Checks whether or not this is a validator account. @@ -290,6 +293,9 @@ pub trait StakingInterface { } } + /// Returns the fraction of the slash to be rewarded to reporter. + fn slash_reward_fraction() -> Perbill; + #[cfg(feature = "runtime-benchmarks")] fn max_exposure_page_size() -> Page; @@ -304,6 +310,34 @@ pub trait StakingInterface { fn set_current_era(era: EraIndex); } +/// Set of low level apis to manipulate staking ledger. +/// +/// These apis bypass some or all safety checks and should only be used if you know what you are +/// doing. +pub trait StakingUnchecked: StakingInterface { + /// Migrate an existing staker to a virtual staker. + /// + /// It would release all funds held by the implementation pallet. + fn migrate_to_virtual_staker(who: &Self::AccountId); + + /// Book-keep a new bond for `keyless_who` without applying any locks (hence virtual). + /// + /// It is important that `keyless_who` is a keyless account and therefore cannot interact with + /// staking pallet directly. Caller is responsible for ensuring the passed amount is locked and + /// valid. + fn virtual_bond( + keyless_who: &Self::AccountId, + value: Self::Balance, + payee: &Self::AccountId, + ) -> DispatchResult; + + /// Migrate a virtual staker to a direct staker. + /// + /// Only used for testing. + #[cfg(feature = "runtime-benchmarks")] + fn migrate_to_direct_staker(who: &Self::AccountId); +} + /// The amount of exposure for an era that an individual nominator has (susceptible to slashing). #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct IndividualExposure { From f3c3ebb6a99295816ac4ee0a26364d736094c147 Mon Sep 17 00:00:00 2001 From: gui Date: Sat, 20 Apr 2024 17:20:35 +0900 Subject: [PATCH 26/74] Fix case in type in macro generation (#4223) Generated type is not camel case this generate some warnings from IDE label should be R0 --- substrate/frame/support/procedural/src/runtime/expand/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/support/procedural/src/runtime/expand/mod.rs b/substrate/frame/support/procedural/src/runtime/expand/mod.rs index 93c88fce94b7..011f69f37147 100644 --- a/substrate/frame/support/procedural/src/runtime/expand/mod.rs +++ b/substrate/frame/support/procedural/src/runtime/expand/mod.rs @@ -244,7 +244,7 @@ fn construct_runtime_final_expansion( // Prevent UncheckedExtrinsic to print unused warning. const _: () = { #[allow(unused)] - type __hidden_use_of_unchecked_extrinsic = #unchecked_extrinsic; + type __HiddenUseOfUncheckedExtrinsic = #unchecked_extrinsic; }; #[derive( From 253778c94dd64e6bc174ed1e03ac7e0b43990129 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Mon, 22 Apr 2024 15:08:38 +1000 Subject: [PATCH 27/74] ci: disallow westend migration failure (#4205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- .gitlab/pipeline/check.yml | 1 - polkadot/runtime/westend/src/lib.rs | 30 +---------------------------- 2 files changed, 1 insertion(+), 30 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 89b2c00db9b2..6fb8a97fe958 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -132,7 +132,6 @@ check-runtime-migration-westend: WASM: "westend_runtime.compact.compressed.wasm" URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443" SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" - allow_failure: true check-runtime-migration-rococo: stage: check diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a06a1e1f7fc8..02933efff944 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1646,36 +1646,8 @@ pub mod migrations { } } - // We don't have a limit in the Relay Chain. - const IDENTITY_MIGRATION_KEY_LIMIT: u64 = u64::MAX; - /// Unreleased migrations. Add new ones here: - pub type Unreleased = ( - parachains_configuration::migration::v7::MigrateToV7, - pallet_staking::migrations::v14::MigrateToV14, - assigned_slots::migration::v1::MigrateToV1, - parachains_scheduler::migration::MigrateV1ToV2, - parachains_configuration::migration::v8::MigrateToV8, - parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::MigrateToV1, - pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_grandpa::migrations::MigrateV4ToV5, - parachains_configuration::migration::v10::MigrateToV10, - pallet_nomination_pools::migration::unversioned::TotalValueLockedSync, - // Migrate Identity pallet for Usernames - pallet_identity::migration::versioned::V0ToV1, - parachains_configuration::migration::v11::MigrateToV11, - parachains_configuration::migration::v12::MigrateToV12, - // permanent - pallet_xcm::migration::MigrateToLatestXcmVersion, - // Migrate from legacy lease to coretime. Needs to run after configuration v11 - coretime::migration::MigrateToCoretime< - Runtime, - crate::xcm_config::XcmRouter, - GetLegacyLeaseImpl, - >, - parachains_inclusion::migration::MigrateToV1, - ); + pub type Unreleased = (); } /// Unchecked extrinsic type as expected by this runtime. From e0202ece6390ad216e0bec455ee8ab47925d9caf Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 22 Apr 2024 13:26:32 +0200 Subject: [PATCH 28/74] [xcm] Assets: sort after `prepend_with` (#4235) Adds sorting to the XCM Assets' `prepend_with`, which could modify the order of `AssetId` locations. Relates to: https://github.com/paritytech/polkadot-sdk/pull/4186 (the same fix for `reanchored`) Part of: https://github.com/paritytech/polkadot-sdk/pull/2129 --- polkadot/xcm/src/v3/multiasset.rs | 71 ++++++++++++++++++++++++++++++- polkadot/xcm/src/v4/asset.rs | 59 ++++++++++++++++++++++++- 2 files changed, 126 insertions(+), 4 deletions(-) diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index f9041ecd81ba..0662077b19d0 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -825,7 +825,9 @@ impl MultiAssets { /// Prepend a `MultiLocation` to any concrete asset items, giving it a new root location. pub fn prepend_with(&mut self, prefix: &MultiLocation) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix)) + self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix))?; + self.0.sort(); + Ok(()) } /// Mutate the location of the asset identifier if concrete, giving it the same location @@ -1213,8 +1215,73 @@ mod tests { vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); + assert!(assets.reanchor(&dest, reanchor_context).is_ok()); - assert_eq!(assets, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored].into()); + assert_eq!(assets.0, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored]); + + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); + } + + #[test] + fn prepend_preserves_sorting() { + use super::*; + use alloc::vec; + + let prefix = MultiLocation::new(0, X1(Parachain(1000))); + + let asset_1: MultiAsset = + (MultiLocation::new(0, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_1_prepended = asset_1.clone(); + assert!(asset_1_prepended.prepend_with(&prefix).is_ok()); + // changes interior X2->X3 + assert_eq!( + asset_1_prepended, + (MultiLocation::new(0, X3(Parachain(1000), PalletInstance(50), GeneralIndex(1))), 10) + .into() + ); + + let asset_2: MultiAsset = + (MultiLocation::new(1, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_2_prepended = asset_2.clone(); + assert!(asset_2_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_2_prepended, + (MultiLocation::new(0, X2(PalletInstance(50), GeneralIndex(1))), 10).into() + ); + + let asset_3: MultiAsset = + (MultiLocation::new(2, X2(PalletInstance(50), GeneralIndex(1))), 10).into(); + let mut asset_3_prepended = asset_3.clone(); + assert!(asset_3_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_3_prepended, + (MultiLocation::new(1, X2(PalletInstance(50), GeneralIndex(1))), 10).into() + ); + + // `From` impl does sorting. + let mut assets: MultiAssets = vec![asset_1, asset_2, asset_3].into(); + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); + + // let's do `prepend_with` + assert!(assets.prepend_with(&prefix).is_ok()); + assert_eq!(assets.0, vec![asset_2_prepended, asset_1_prepended, asset_3_prepended]); + + // decoding respects limits and sorting + assert!(assets + .using_encoded(|mut enc| MultiAssets::decode(&mut enc).map(|_| ())) + .is_ok()); } #[test] diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index bdff0c272306..8abd8f9f8fd0 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -723,7 +723,9 @@ impl Assets { /// Prepend a `Location` to any concrete asset items, giving it a new root location. pub fn prepend_with(&mut self, prefix: &Location) -> Result<(), ()> { - self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix)) + self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix))?; + self.0.sort(); + Ok(()) } /// Return a reference to an item at a specific index or `None` if it doesn't exist. @@ -1035,8 +1037,61 @@ mod tests { let mut assets: Assets = vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + assert!(assets.reanchor(&dest, &reanchor_context).is_ok()); - assert_eq!(assets, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored].into()); + assert_eq!(assets.0, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + } + + #[test] + fn prepend_preserves_sorting() { + use super::*; + use alloc::vec; + + let prefix = Location::new(0, [Parachain(1000)]); + + let asset_1: Asset = (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_1_prepended = asset_1.clone(); + assert!(asset_1_prepended.prepend_with(&prefix).is_ok()); + // changes interior X2->X3 + assert_eq!( + asset_1_prepended, + (Location::new(0, [Parachain(1000), PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_2: Asset = (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_2_prepended = asset_2.clone(); + assert!(asset_2_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_2_prepended, + (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_3: Asset = (Location::new(2, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_3_prepended = asset_3.clone(); + assert!(asset_3_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_3_prepended, + (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + // `From` impl does sorting. + let mut assets: Assets = vec![asset_1, asset_2, asset_3].into(); + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + + // let's do `prepend_with` + assert!(assets.prepend_with(&prefix).is_ok()); + assert_eq!(assets.0, vec![asset_2_prepended, asset_1_prepended, asset_3_prepended]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); } #[test] From ff7e2c88a460a0f9df26eb3f33d1c37f72508580 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 22 Apr 2024 13:34:04 +0200 Subject: [PATCH 29/74] Sanitize `UniversalLocation` witth `GlobalConsensus` + XCM small nits and improvements (#4238) This PR: - sanitizes all `UniversalLocation`s with `GlobalConsensus` (when possible) - addressing [comment](https://github.com/paritytech/polkadot-sdk/pull/4025#discussion_r1557361473) - adds `DefaultConfig` for `pallet-xcm-benchmarks` for `system` --- Cargo.lock | 1 - .../contracts-rococo/src/xcm_config.rs | 4 +-- .../glutton/glutton-westend/src/xcm_config.rs | 4 +-- .../runtimes/starters/shell/src/xcm_config.rs | 4 +-- .../testing/rococo-parachain/src/lib.rs | 4 +-- polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 1 - .../src/fungible/mock.rs | 32 +------------------ .../pallet-xcm-benchmarks/src/generic/mock.rs | 31 +----------------- .../xcm/pallet-xcm-benchmarks/src/mock.rs | 2 +- polkadot/xcm/pallet-xcm/src/mock.rs | 2 +- .../xcm/xcm-builder/src/universal_exports.rs | 4 +-- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 2 +- .../xcm-simulator/example/src/parachain.rs | 2 +- .../xcm-simulator/example/src/relay_chain.rs | 2 +- .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 2 +- .../xcm-simulator/fuzzer/src/relay_chain.rs | 2 +- .../contracts/mock-network/src/parachain.rs | 2 +- .../contracts/mock-network/src/relay_chain.rs | 2 +- .../runtime/src/configs/xcm_config.rs | 2 ++ 19 files changed, 23 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 951f2548d34d..c932927a905e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11777,7 +11777,6 @@ dependencies = [ "polkadot-primitives", "polkadot-runtime-common", "scale-info", - "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 46fcbc6319c9..9132b4e17602 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -51,9 +51,9 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const RelayLocation: Location = Location::parent(); - pub const RelayNetwork: Option = None; + pub const RelayNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorLocation = Parachain(ParachainInfo::parachain_id().into()).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub const ExecutiveBody: BodyId = BodyId::Executive; pub TreasuryAccount: AccountId = TREASURY_PALLET_ID.into_account_truncating(); pub RelayTreasuryLocation: Location = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs index 15bb519e115c..9d438a41f8fe 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -30,8 +30,8 @@ use xcm_builder::{ parameter_types! { pub const WestendLocation: Location = Location::parent(); - pub const WestendNetwork: Option = Some(NetworkId::Westend); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub const WestendNetwork: NetworkId = NetworkId::Westend; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(WestendNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); } /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, diff --git a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs index df89158729cd..7f9de0f64b35 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/xcm_config.rs @@ -30,8 +30,8 @@ use xcm_builder::{ parameter_types! { pub const RococoLocation: Location = Location::parent(); - pub const RococoNetwork: Option = Some(NetworkId::Rococo); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub const RococoNetwork: NetworkId = NetworkId::Rococo; + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RococoNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); } /// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index df335368be1c..0ae93d1577ce 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -327,9 +327,9 @@ impl cumulus_pallet_aura_ext::Config for Runtime {} parameter_types! { pub const RocLocation: Location = Location::parent(); - pub const RococoNetwork: Option = Some(NetworkId::Rococo); + pub const RococoNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorLocation = [Parachain(ParachainInfo::parachain_id().into())].into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RococoNetwork::get()), Parachain(ParachainInfo::parachain_id().into())].into(); pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 8c71426a6fae..9691ddd48168 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -29,7 +29,6 @@ log = { workspace = true, default-features = true } [dev-dependencies] pallet-balances = { path = "../../../substrate/frame/balances" } pallet-assets = { path = "../../../substrate/frame/assets" } -sp-core = { path = "../../../substrate/primitives/core" } sp-tracing = { path = "../../../substrate/primitives/tracing" } xcm = { package = "staging-xcm", path = ".." } # temp diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index c831cd024659..d11f64e74944 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -20,11 +20,8 @@ use crate::{fungible as xcm_balances_benchmark, mock::*}; use frame_benchmarking::BenchmarkError; use frame_support::{ derive_impl, parameter_types, - traits::{ConstU32, Everything, Nothing}, - weights::Weight, + traits::{Everything, Nothing}, }; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use xcm::latest::prelude::*; use xcm_builder::{AllowUnpaidExecutionFrom, FrameTransactionalProcessor, MintLocation}; @@ -40,37 +37,10 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 534f7d85ea2e..f41df017b9db 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -21,10 +21,8 @@ use codec::Decode; use frame_support::{ derive_impl, parameter_types, traits::{Contains, Everything, OriginTrait}, - weights::Weight, }; -use sp_core::H256; -use sp_runtime::traits::{BlakeTwo256, IdentityLookup, TrailingZeroInput}; +use sp_runtime::traits::TrailingZeroInput; use xcm_builder::{ test_utils::{ AssetsInHolding, TestAssetExchanger, TestAssetLocker, TestAssetTrap, @@ -45,37 +43,10 @@ frame_support::construct_runtime!( } ); -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); -} - #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type Hash = H256; - type RuntimeCall = RuntimeCall; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } /// The benchmarks in this pallet should never need an asset transactor to begin with. diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs index 78a9e5f8a018..be3af5d4a3f3 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/mock.rs @@ -58,7 +58,7 @@ impl xcm_executor::traits::ConvertLocation for AccountIdConverter { } parameter_types! { - pub UniversalLocation: InteriorLocation = Junction::Parachain(101).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(ByGenesis([1; 32])), Junction::Parachain(101)].into(); pub UnitWeightCost: Weight = Weight::from_parts(10, 10); pub WeightPrice: (AssetId, u128, u128) = (AssetId(Here.into()), 1_000_000, 1024); } diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 2cc228476ba8..e3680c530e24 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -413,7 +413,7 @@ parameter_types! { )), }; pub const AnyNetwork: Option = None; - pub UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = GlobalConsensus(ByGenesis([0; 32])).into(); pub UnitWeightCost: u64 = 1_000; pub CheckingAccount: AccountId = XcmPallet::check_account(); } diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index 6e031cdbc270..d0e3ef3032ea 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -187,7 +187,7 @@ pub fn forward_id_for(original_id: &XcmHash) -> XcmHash { /// end with the `SetTopic` instruction. /// /// In the case that the message ends with a `SetTopic(T)` (as should be the case if the top-level -/// router is `EnsureUniqueTopic`), then the forwarding message (i.e. the one carrying the +/// router is `WithUniqueTopic`), then the forwarding message (i.e. the one carrying the /// export instruction *to* the bridge in local consensus) will also end with a `SetTopic` whose /// inner is `forward_id_for(T)`. If this is not the case then the onward message will not be given /// the `SetTopic` afterword. @@ -254,7 +254,7 @@ impl = None; - pub UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); pub UnitWeightCost: u64 = 1_000; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index cadfc1e7200c..d8d65fbf0ce7 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -101,7 +101,7 @@ parameter_types! { parameter_types! { pub const KsmLocation: Location = Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub UniversalLocation: InteriorLocation = Parachain(MsgQueue::parachain_id().into()).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); } pub type LocationToAccountId = ( diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 6790b535d169..47209b765d15 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -104,7 +104,7 @@ parameter_types! { pub const TokenLocation: Location = Here.into_location(); pub const ThisNetwork: NetworkId = NetworkId::ByGenesis([0; 32]); pub const AnyNetwork: Option = None; - pub const UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = ThisNetwork::get().into(); } pub type SovereignAccountOf = diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index d4ad47581d16..843efab1502e 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -144,7 +144,7 @@ parameter_types! { pub const KsmLocation: Location = Location::parent(); pub const TokenLocation: Location = Here.into_location(); pub const RelayNetwork: NetworkId = ByGenesis([0; 32]); - pub UniversalLocation: InteriorLocation = Parachain(MsgQueue::parachain_id().into()).into(); + pub UniversalLocation: InteriorLocation = [GlobalConsensus(RelayNetwork::get()), Parachain(MsgQueue::parachain_id().into())].into(); } pub type XcmOriginToCallOrigin = ( diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index 470304ed357e..d5e0ec9c83fa 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -107,7 +107,7 @@ impl configuration::Config for Runtime { parameter_types! { pub RelayNetwork: NetworkId = ByGenesis([0; 32]); pub const TokenLocation: Location = Here.into_location(); - pub UniversalLocation: InteriorLocation = Here; + pub UniversalLocation: InteriorLocation = RelayNetwork::get().into(); pub UnitWeightCost: u64 = 1_000; } diff --git a/templates/parachain/runtime/src/configs/xcm_config.rs b/templates/parachain/runtime/src/configs/xcm_config.rs index 13da2363b053..c6b6e8da1b89 100644 --- a/templates/parachain/runtime/src/configs/xcm_config.rs +++ b/templates/parachain/runtime/src/configs/xcm_config.rs @@ -26,6 +26,8 @@ parameter_types! { pub const RelayLocation: Location = Location::parent(); pub const RelayNetwork: Option = None; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + // For the real deployment, it is recommended to set `RelayNetwork` according to the relay chain + // and prepend `UniversalLocation` with `GlobalConsensus(RelayNetwork::get())`. pub UniversalLocation: InteriorLocation = Parachain(ParachainInfo::parachain_id().into()).into(); } From 921265ca7889b9c9bc615af0eced9c6918c8af9f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 22 Apr 2024 15:06:16 +0300 Subject: [PATCH 30/74] Added prdoc for 4208 (#4239) --- prdoc/pr_4208.prdoc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 prdoc/pr_4208.prdoc diff --git a/prdoc/pr_4208.prdoc b/prdoc/pr_4208.prdoc new file mode 100644 index 000000000000..be2a1b084a5f --- /dev/null +++ b/prdoc/pr_4208.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fixed GrandpaConsensusLogReader::find_scheduled_change + +doc: + - audience: Runtime Dev + description: | + This PR fixes the issue with authorities set change digest item search + in the bridges code. The issue happens when there are multiple consensus + digest items in the same header digest. + +crates: + - name: bp-header-chain From a2a049db2bd669a88f6ab410b22b780ebcc8baee Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 22 Apr 2024 14:45:54 +0200 Subject: [PATCH 31/74] [subsystem-benchmark] Add approval-voting benchmark to CI (#4216) Co-authored-by: alvicsam --- .gitlab/pipeline/publish.yml | 4 + .gitlab/pipeline/test.yml | 30 +++--- Cargo.lock | 1 + polkadot/node/core/approval-voting/Cargo.toml | 11 +++ .../approval-voting-regression-bench.rs | 94 +++++++++++++++++++ 5 files changed, 125 insertions(+), 15 deletions(-) create mode 100644 polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index d8f5d5832291..68712610ad23 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -74,6 +74,8 @@ publish-subsystem-benchmarks: artifacts: true - job: subsystem-benchmark-availability-distribution artifacts: true + - job: subsystem-benchmark-approval-voting + artifacts: true - job: publish-rustdoc artifacts: false script: @@ -115,6 +117,8 @@ trigger_workflow: artifacts: true - job: subsystem-benchmark-availability-distribution artifacts: true + - job: subsystem-benchmark-approval-voting + artifacts: true script: - echo "Triggering workflow" - > diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 1d6efd7b9fd1..c17a3ce35eaf 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -511,7 +511,7 @@ test-syscalls: fi allow_failure: false # this rarely triggers in practice -subsystem-benchmark-availability-recovery: +.subsystem-benchmark-template: stage: test artifacts: name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" @@ -523,26 +523,26 @@ subsystem-benchmark-availability-recovery: - .docker-env - .common-refs - .run-immediately - script: - - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks tags: - benchmark + +subsystem-benchmark-availability-recovery: + extends: + - .subsystem-benchmark-template + script: + - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks allow_failure: true subsystem-benchmark-availability-distribution: - stage: test - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: always - expire_in: 1 hour - paths: - - charts/ extends: - - .docker-env - - .common-refs - - .run-immediately + - .subsystem-benchmark-template script: - cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks - tags: - - benchmark + allow_failure: true + +subsystem-benchmark-approval-voting: + extends: + - .subsystem-benchmark-template + script: + - cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks allow_failure: true diff --git a/Cargo.lock b/Cargo.lock index c932927a905e..fa5c42c1fa32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13017,6 +13017,7 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "polkadot-primitives-test-helpers", + "polkadot-subsystem-bench", "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index ced7706c40a2..473bc67923b6 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -53,3 +53,14 @@ kvdb-memorydb = "0.13.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } log = { workspace = true, default-features = true } env_logger = "0.11" + +polkadot-subsystem-bench = { path = "../../subsystem-bench" } + +[[bench]] +name = "approval-voting-regression-bench" +path = "benches/approval-voting-regression-bench.rs" +harness = false +required-features = ["subsystem-benchmarks"] + +[features] +subsystem-benchmarks = [] diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs new file mode 100644 index 000000000000..cad45dc64d2e --- /dev/null +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -0,0 +1,94 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! approval-voting throughput test +//! +//! Approval Voting benchmark based on Kusama parameters and scale. +//! +//! Subsystems involved: +//! - approval-distribution +//! - approval-voting + +use polkadot_subsystem_bench::{ + self, + approval::{bench_approvals, prepare_test, ApprovalsOptions}, + configuration::TestConfiguration, + usage::BenchmarkUsage, + utils::save_to_file, +}; +use std::io::Write; + +const BENCH_COUNT: usize = 10; + +fn main() -> Result<(), String> { + let mut messages = vec![]; + let mut config = TestConfiguration::default(); + config.n_cores = 100; + config.n_validators = 500; + config.num_blocks = 10; + config.peer_bandwidth = 524288000000; + config.bandwidth = 524288000000; + config.latency = None; + config.connectivity = 100; + config.generate_pov_sizes(); + let options = ApprovalsOptions { + last_considered_tranche: 89, + coalesce_mean: 3.0, + coalesce_std_dev: 1.0, + coalesce_tranche_diff: 12, + enable_assignments_v2: true, + stop_when_approved: false, + workdir_prefix: "/tmp".to_string(), + num_no_shows_per_candidate: 0, + }; + + println!("Benchmarking..."); + let usages: Vec = (0..BENCH_COUNT) + .map(|n| { + print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n)); + std::io::stdout().flush().unwrap(); + let (mut env, state) = prepare_test(config.clone(), options.clone(), false); + env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state)) + }) + .collect(); + println!("\rDone!{}", " ".repeat(BENCH_COUNT)); + + let average_usage = BenchmarkUsage::average(&usages); + save_to_file( + "charts/availability-distribution-regression-bench.json", + average_usage.to_chart_json().map_err(|e| e.to_string())?, + ) + .map_err(|e| e.to_string())?; + println!("{}", average_usage); + + // We expect no variance for received and sent + // but use 0.001 because we operate with floats + messages.extend(average_usage.check_network_usage(&[ + ("Received from peers", 52944.7000, 0.001), + ("Sent to peers", 63532.2000, 0.001), + ])); + messages.extend(average_usage.check_cpu_usage(&[ + ("approval-distribution", 7.7883, 0.1), + ("approval-voting", 10.4655, 0.1), + ])); + + if messages.is_empty() { + Ok(()) + } else { + eprintln!("{}", messages.join("\n")); + Err("Regressions found".to_string()) + } +} From fb8b64e32b0d59e0e50265fd213837a971895d37 Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Mon, 22 Apr 2024 18:16:42 +0200 Subject: [PATCH 32/74] [subsystem-benchmark] Fix results filename for approval-voting benches (#4243) --- .../approval-voting/benches/approval-voting-regression-bench.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index cad45dc64d2e..7157362a79c7 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -68,7 +68,7 @@ fn main() -> Result<(), String> { let average_usage = BenchmarkUsage::average(&usages); save_to_file( - "charts/availability-distribution-regression-bench.json", + "charts/approval-voting-regression-bench.json", average_usage.to_chart_json().map_err(|e| e.to_string())?, ) .map_err(|e| e.to_string())?; From 3380e21cd92690c2066f686164a954ba7cd17244 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 22 Apr 2024 18:34:29 +0200 Subject: [PATCH 33/74] Use default branch of `psvm` when synchronizing templates (#4240) We cannot lock to a specific version of `psvm`, because we will need to keep it up-to-date - each release currently requires a change in `psvm` such as [this one](https://github.com/paritytech/psvm/pull/2/files). There is no `stable` branch in `psvm` repo or anything so using the default branch. --- .github/workflows/sync-templates.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-templates.yml b/.github/workflows/sync-templates.yml index 511c9d0e8cd0..3617d6c34a3e 100644 --- a/.github/workflows/sync-templates.yml +++ b/.github/workflows/sync-templates.yml @@ -61,7 +61,7 @@ jobs: - name: Install toml-cli run: cargo install --git https://github.com/gnprice/toml-cli --rev ea69e9d2ca4f0f858110dc7a5ae28bcb918c07fb # v0.2.3 - name: Install Polkadot SDK Version Manager - run: cargo install --git https://github.com/paritytech/psvm --rev c41261ffb52ab0c115adbbdb17e2cb7900d2bdfd psvm # master + run: cargo install --git https://github.com/paritytech/psvm psvm - name: Rust compilation prerequisites run: | sudo apt update From bd9287f766bded2022036a63d12fb86a2f7174a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 22 Apr 2024 21:28:27 +0200 Subject: [PATCH 34/74] wasm-builder: Make it easier to build a WASM binary (#4177) Basically combines all the recommended calls into one `build_using_defaults()` call or `init_with_defaults()` when there are some custom changes required. --- .../runtimes/assets/asset-hub-rococo/build.rs | 6 +--- .../assets/asset-hub-westend/build.rs | 6 +--- .../collectives/collectives-westend/build.rs | 6 +--- .../contracts/contracts-rococo/build.rs | 6 +--- .../runtimes/glutton/glutton-westend/build.rs | 6 +--- .../runtimes/testing/penpal/build.rs | 6 +--- .../testing/rococo-parachain/build.rs | 6 +--- cumulus/test/runtime/build.rs | 10 ++---- polkadot/runtime/rococo/build.rs | 11 ++----- polkadot/runtime/test-runtime/build.rs | 6 +--- polkadot/runtime/westend/build.rs | 6 +--- prdoc/pr_4177.prdoc | 12 +++++++ substrate/utils/wasm-builder/src/builder.rs | 33 +++++++++++++++++++ substrate/utils/wasm-builder/src/lib.rs | 12 ++----- templates/minimal/runtime/build.rs | 6 +--- templates/parachain/runtime/build.rs | 6 +--- templates/solochain/runtime/build.rs | 6 +--- 17 files changed, 64 insertions(+), 86 deletions(-) create mode 100644 prdoc/pr_4177.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs index 60f8a125129f..239ccac19ec7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs index 60f8a125129f..239ccac19ec7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs index 60f8a125129f..239ccac19ec7 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs index 60f8a125129f..239ccac19ec7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs index 1580e6f07bec..2f311357403c 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs @@ -16,9 +16,5 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + WasmBuilder::build_using_defaults(); } diff --git a/cumulus/parachains/runtimes/testing/penpal/build.rs b/cumulus/parachains/runtimes/testing/penpal/build.rs index 9c9cde9a25a1..c2fa89aa7028 100644 --- a/cumulus/parachains/runtimes/testing/penpal/build.rs +++ b/cumulus/parachains/runtimes/testing/penpal/build.rs @@ -16,11 +16,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs index 60f8a125129f..239ccac19ec7 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/build.rs @@ -15,11 +15,7 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } #[cfg(not(feature = "std"))] diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 5e5f6a35a505..ebd5c178cba0 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -18,16 +18,10 @@ fn main() { use substrate_wasm_builder::WasmBuilder; - WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); + WasmBuilder::build_using_defaults(); - WasmBuilder::new() - .with_current_project() + WasmBuilder::init_with_defaults() .enable_feature("increment-spec-version") - .import_memory() .set_file_name("wasm_binary_spec_version_incremented.rs") .build(); } diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index 0b7ee77b0d0d..403c31ff21c7 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -16,18 +16,11 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build(); + substrate_wasm_builder::WasmBuilder::build_using_defaults(); - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() + substrate_wasm_builder::WasmBuilder::init_with_defaults() .set_file_name("fast_runtime_binary.rs") .enable_feature("fast-runtime") - .import_memory() - .export_heap_base() .build(); } diff --git a/polkadot/runtime/test-runtime/build.rs b/polkadot/runtime/test-runtime/build.rs index 404ba3f2fdbd..caf24317d0b3 100644 --- a/polkadot/runtime/test-runtime/build.rs +++ b/polkadot/runtime/test-runtime/build.rs @@ -17,9 +17,5 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build() + WasmBuilder::build_using_defaults(); } diff --git a/polkadot/runtime/westend/build.rs b/polkadot/runtime/westend/build.rs index 428c971bc132..0b3e12c78c74 100644 --- a/polkadot/runtime/westend/build.rs +++ b/polkadot/runtime/westend/build.rs @@ -17,9 +17,5 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build() + WasmBuilder::build_using_defaults(); } diff --git a/prdoc/pr_4177.prdoc b/prdoc/pr_4177.prdoc new file mode 100644 index 000000000000..29d011c93516 --- /dev/null +++ b/prdoc/pr_4177.prdoc @@ -0,0 +1,12 @@ +title: "wasm-builder: Make it easier to build a WASM binary" + +doc: + - audience: [Runtime Dev, Node Dev] + description: | + Combines all the recommended calls of the `WasmBuilder` into + `build_using_defaults()` or `init_with_defaults()` if more changes are required. + Otherwise the interface doesn't change and users can still continue to use + the "old" interface. + +crates: + - name: substrate-wasm-builder diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index d2aaff448bc5..163703fbec62 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -116,6 +116,39 @@ impl WasmBuilder { WasmBuilderSelectProject { _ignore: () } } + /// Build the WASM binary using the recommended default values. + /// + /// This is the same as calling: + /// ```no_run + /// substrate_wasm_builder::WasmBuilder::new() + /// .with_current_project() + /// .import_memory() + /// .export_heap_base() + /// .build(); + /// ``` + pub fn build_using_defaults() { + WasmBuilder::new() + .with_current_project() + .import_memory() + .export_heap_base() + .build(); + } + + /// Init the wasm builder with the recommended default values. + /// + /// In contrast to [`Self::build_using_defaults`] it does not build the WASM binary directly. + /// + /// This is the same as calling: + /// ```no_run + /// substrate_wasm_builder::WasmBuilder::new() + /// .with_current_project() + /// .import_memory() + /// .export_heap_base(); + /// ``` + pub fn init_with_defaults() -> Self { + WasmBuilder::new().with_current_project().import_memory().export_heap_base() + } + /// Enable exporting `__heap_base` as global variable in the WASM binary. /// /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 178e499e8f5b..9ebab38b9cb2 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -33,15 +33,9 @@ //! use substrate_wasm_builder::WasmBuilder; //! //! fn main() { -//! WasmBuilder::new() -//! // Tell the builder to build the project (crate) this `build.rs` is part of. -//! .with_current_project() -//! // Make sure to export the `heap_base` global, this is required by Substrate -//! .export_heap_base() -//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) -//! .import_memory() -//! // Build it. -//! .build() +//! // Builds the WASM binary using the recommended defaults. +//! // If you need more control, you can call `new` or `init_with_defaults`. +//! WasmBuilder::build_using_defaults(); //! } //! ``` //! diff --git a/templates/minimal/runtime/build.rs b/templates/minimal/runtime/build.rs index b7676a70dfe8..e6f92757e225 100644 --- a/templates/minimal/runtime/build.rs +++ b/templates/minimal/runtime/build.rs @@ -18,10 +18,6 @@ fn main() { #[cfg(feature = "std")] { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } } diff --git a/templates/parachain/runtime/build.rs b/templates/parachain/runtime/build.rs index 02d6973f29cf..bb05afe02b1f 100644 --- a/templates/parachain/runtime/build.rs +++ b/templates/parachain/runtime/build.rs @@ -1,10 +1,6 @@ #[cfg(feature = "std")] fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } /// The wasm builder is deactivated when compiling diff --git a/templates/solochain/runtime/build.rs b/templates/solochain/runtime/build.rs index c03d618535be..f262c320393b 100644 --- a/templates/solochain/runtime/build.rs +++ b/templates/solochain/runtime/build.rs @@ -1,10 +1,6 @@ fn main() { #[cfg(feature = "std")] { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build(); + substrate_wasm_builder::WasmBuilder::build_using_defaults(); } } From 84c294c3821baf8b81693ce6e5615b9e157b5303 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 23 Apr 2024 01:10:07 +0300 Subject: [PATCH 35/74] [testnets] remove XCM SafeCallFilter for chains using Weights::v3 (#4199) Weights::v3 also accounts for PoV weight so we no longer need the SafeCallFilter. All calls are allowed as long as they "fit in the block". --- .../assets/asset-hub-rococo/src/xcm_config.rs | 223 +---------------- .../asset-hub-westend/src/xcm_config.rs | 228 +----------------- .../bridge-hub-rococo/src/xcm_config.rs | 114 +-------- .../bridge-hub-westend/src/xcm_config.rs | 74 +----- .../collectives-westend/src/xcm_config.rs | 83 +------ .../coretime-rococo/src/xcm_config.rs | 49 +--- .../coretime-westend/src/xcm_config.rs | 48 +--- .../people/people-rococo/src/xcm_config.rs | 55 +---- .../people/people-westend/src/xcm_config.rs | 55 +---- prdoc/pr_4199.prdoc | 29 +++ 10 files changed, 56 insertions(+), 902 deletions(-) create mode 100644 prdoc/pr_4199.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index fceb82b6b06b..dbf27fb39ac5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -61,7 +61,7 @@ use xcm_builder::{ WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const TokenLocation: Location = Location::parent(); @@ -263,223 +263,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| { - k.eq(&bridging::XcmBridgeHubRouterByteFee::key()) || - k.eq(&bridging::XcmBridgeHubRouterBaseFee::key()) || - k.eq(&bridging::to_ethereum::BridgeHubEthereumBaseFee::key()) - }) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::PoolAssets( - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } | - pallet_asset_conversion::Call::add_liquidity { .. } | - pallet_asset_conversion::Call::remove_liquidity { .. } | - pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | - pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, - ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } | - pallet_nft_fractionalization::Call::unify { .. }, - ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, - ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToWestendXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -632,8 +415,8 @@ impl xcm_executor::Config for XcmConfig { type MessageExporter = (); type UniversalAliases = (bridging::to_westend::UniversalAliases, bridging::to_ethereum::UniversalAliases); - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 41e941ee9a2b..ed8a58af396c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -57,7 +57,7 @@ use xcm_builder::{ WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const WestendLocation: Location = Location::parent(); @@ -275,228 +275,6 @@ impl Contains for AmbassadorEntities { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || - items - .iter() - .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::PoolAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } | - pallet_asset_conversion::Call::add_liquidity { .. } | - pallet_asset_conversion::Call::remove_liquidity { .. } | - pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | - pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, - ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } | - pallet_nft_fractionalization::Call::unify { .. }, - ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, - ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -653,8 +431,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = (bridging::to_rococo::UniversalAliases,); - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 063c999aa7ad..f354ccce21fe 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -19,22 +19,12 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; -use crate::{ - bridge_common_config::{ - BridgeGrandpaRococoBulletinInstance, BridgeGrandpaWestendInstance, - BridgeParachainWestendInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, - }, - bridge_to_bulletin_config::WithRococoBulletinMessagesInstance, - bridge_to_westend_config::WithBridgeHubWestendMessagesInstance, - EthereumGatewayAddress, -}; use bp_messages::LaneId; use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; use bp_runtime::ChainId; use frame_support::{ parameter_types, traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing}, - StoragePrefixedMap, }; use frame_system::EnsureRoot; use pallet_collator_selection::StakingPotAccountId; @@ -64,7 +54,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeToAccount, }; use xcm_executor::{ - traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset, WithOriginFilter}, + traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset}, XcmExecutor, }; @@ -138,104 +128,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| { - k.eq(&DeliveryRewardInBalance::key()) || - k.eq(&RequiredStakeForStakeAndSlash::key()) || - k.eq(&EthereumGatewayAddress::key()) || - // Allow resetting of Ethereum nonces in Rococo only. - k.starts_with(&snowbridge_pallet_inbound_queue::Nonce::::final_prefix()) || - k.starts_with(&snowbridge_pallet_outbound_queue::Nonce::::final_prefix()) - }) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaWestendInstance, - >::initialize { .. }) | - RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaWestendInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeWestendParachains(pallet_bridge_parachains::Call::< - Runtime, - BridgeParachainWestendInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeWestendMessages(pallet_bridge_messages::Call::< - Runtime, - WithBridgeHubWestendMessagesInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgePolkadotBulletinGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoBulletinInstance, - >::initialize { .. }) | - RuntimeCall::BridgePolkadotBulletinGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoBulletinInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgePolkadotBulletinMessages(pallet_bridge_messages::Call::< - Runtime, - WithRococoBulletinMessagesInstance, - >::set_operating_mode { .. }) | - RuntimeCall::EthereumBeaconClient( - snowbridge_pallet_ethereum_client::Call::force_checkpoint { .. } | - snowbridge_pallet_ethereum_client::Call::set_operating_mode { .. }, - ) | RuntimeCall::EthereumInboundQueue( - snowbridge_pallet_inbound_queue::Call::set_operating_mode { .. }, - ) | RuntimeCall::EthereumOutboundQueue( - snowbridge_pallet_outbound_queue::Call::set_operating_mode { .. }, - ) | RuntimeCall::EthereumSystem( - snowbridge_pallet_system::Call::upgrade { .. } | - snowbridge_pallet_system::Call::set_operating_mode { .. } | - snowbridge_pallet_system::Call::set_pricing_parameters { .. } | - snowbridge_pallet_system::Call::force_update_channel { .. } | - snowbridge_pallet_system::Call::force_transfer_native_from_agent { .. } | - snowbridge_pallet_system::Call::set_token_transfer_fees { .. }, - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -337,8 +229,8 @@ impl xcm_executor::Config for XcmConfig { crate::bridge_to_ethereum_config::SnowbridgeExporter, ); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 4870b4a52d7a..31c37c8ffab6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -19,7 +19,6 @@ use super::{ ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; -use crate::bridge_common_config::{DeliveryRewardInBalance, RequiredStakeForStakeAndSlash}; use frame_support::{ parameter_types, traits::{tokens::imbalance::ResolveTo, ConstU32, Contains, Equals, Everything, Nothing}, @@ -48,7 +47,7 @@ use xcm_builder::{ WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const WestendLocation: Location = Location::parent(); @@ -119,73 +118,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - // Allow to change dedicated storage items (called by governance-like) - match call { - RuntimeCall::System(frame_system::Call::set_storage { items }) - if items.iter().all(|(k, _)| { - k.eq(&DeliveryRewardInBalance::key()) | - k.eq(&RequiredStakeForStakeAndSlash::key()) - }) => - return true, - _ => (), - }; - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, - >::initialize { .. }) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - crate::bridge_to_rococo_config::BridgeGrandpaRococoInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeRococoParachains(pallet_bridge_parachains::Call::< - Runtime, - crate::bridge_to_rococo_config::BridgeParachainRococoInstance, - >::set_operating_mode { .. }) | - RuntimeCall::BridgeRococoMessages(pallet_bridge_messages::Call::< - Runtime, - crate::bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >::set_operating_mode { .. }) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -265,8 +197,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (crate::bridge_to_rococo_config::ToBridgeHubRococoHaulBlobExporter,); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 21ccd3b9cdb0..4449284b8aa8 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -44,7 +44,7 @@ use xcm_builder::{ TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const WndLocation: Location = Location::parent(); @@ -138,83 +138,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Alliance( - // `init_members` accepts unbounded vecs as arguments, - // but the call can be initiated only by root origin. - pallet_alliance::Call::init_members { .. } | - pallet_alliance::Call::vote { .. } | - pallet_alliance::Call::disband { .. } | - pallet_alliance::Call::set_rule { .. } | - pallet_alliance::Call::announce { .. } | - pallet_alliance::Call::remove_announcement { .. } | - pallet_alliance::Call::join_alliance { .. } | - pallet_alliance::Call::nominate_ally { .. } | - pallet_alliance::Call::elevate_ally { .. } | - pallet_alliance::Call::give_retirement_notice { .. } | - pallet_alliance::Call::retire { .. } | - pallet_alliance::Call::kick_member { .. } | - pallet_alliance::Call::close { .. } | - pallet_alliance::Call::abdicate_fellow_status { .. }, - ) | RuntimeCall::AllianceMotion( - pallet_collective::Call::vote { .. } | - pallet_collective::Call::disapprove_proposal { .. } | - pallet_collective::Call::close { .. }, - ) | RuntimeCall::FellowshipCollective( - pallet_ranked_collective::Call::add_member { .. } | - pallet_ranked_collective::Call::promote_member { .. } | - pallet_ranked_collective::Call::demote_member { .. } | - pallet_ranked_collective::Call::remove_member { .. }, - ) | RuntimeCall::FellowshipCore( - pallet_core_fellowship::Call::bump { .. } | - pallet_core_fellowship::Call::set_params { .. } | - pallet_core_fellowship::Call::set_active { .. } | - pallet_core_fellowship::Call::approve { .. } | - pallet_core_fellowship::Call::induct { .. } | - pallet_core_fellowship::Call::promote { .. } | - pallet_core_fellowship::Call::offboard { .. } | - pallet_core_fellowship::Call::submit_evidence { .. } | - pallet_core_fellowship::Call::import { .. }, - ) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -287,8 +210,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 7eab53b8a7cf..3e71730e015f 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -48,7 +48,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const RocRelayLocation: Location = Location::parent(); @@ -139,49 +139,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. } | - // Should not be in Polkadot/Kusama. Here in order to speed up testing. - frame_system::Call::set_storage { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::Sudo(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::Broker(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -258,8 +215,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index e1452ec63f20..fc7ecf1e61c3 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -48,7 +48,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const TokenRelayLocation: Location = Location::parent(); @@ -146,48 +146,6 @@ impl Contains for FellowsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. } | - // Should not be in Polkadot/Kusama. Here in order to speed up testing. - frame_system::Call::set_storage { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection(..) | - RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::Broker(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -265,8 +223,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index 1a42adeafd1d..e4e4fa1b2c44 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -45,7 +45,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); @@ -148,55 +148,6 @@ impl Contains for ParentOrParentsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Identity(..) | - RuntimeCall::IdentityMigrator(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -270,8 +221,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 114923270645..590f23f6853f 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -45,7 +45,7 @@ use xcm_builder::{ UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; +use xcm_executor::XcmExecutor; parameter_types! { pub const RootLocation: Location = Location::here(); @@ -155,55 +155,6 @@ impl Contains for FellowsPlurality { } } -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm( - pallet_xcm::Call::force_xcm_version { .. } | - pallet_xcm::Call::force_default_xcm_version { .. } - ) | RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::authorize_upgrade { .. } | - frame_system::Call::authorize_upgrade_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Identity(..) | - RuntimeCall::IdentityMigrator(..) - ) - } -} - pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -278,8 +229,8 @@ impl xcm_executor::Config for XcmConfig { >; type MessageExporter = (); type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; type Aliasers = Nothing; type TransactionalProcessor = FrameTransactionalProcessor; type HrmpNewChannelOpenRequestHandler = (); diff --git a/prdoc/pr_4199.prdoc b/prdoc/pr_4199.prdoc new file mode 100644 index 000000000000..39f08a0532b8 --- /dev/null +++ b/prdoc/pr_4199.prdoc @@ -0,0 +1,29 @@ +title: "Remove XCM SafeCallFilter for chains using Weights::v3" + +doc: + - audience: Runtime User + description: | + `SafeCallFilter` was removed from Rococo and Westend relay and system chains as they + all now use Weights::v3 which already accounts for call PoV size. + This effectively removes artificial limitations on what users can `XCM::Transact` on + these chains (blockspace limitations are still upheld). + +crates: + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: collectives-westend-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: people-westend-runtime + bump: minor From 157294b0d39f1b3dd7307a70c77de5267134ede2 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:17:06 +0300 Subject: [PATCH 36/74] Add metric for time spent waiting in the execution queue (#4250) Add a metric to be able to understand the time jobs are waiting in the execution queue waiting for an available worker. https://github.com/paritytech/polkadot-sdk/issues/4126 Signed-off-by: Alexandru Gheorghe --- polkadot/node/core/pvf/src/execute/queue.rs | 3 ++ polkadot/node/core/pvf/src/metrics.rs | 32 +++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index af147a2ba227..bb00a5a652d6 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -562,6 +562,9 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { thus claim_idle cannot return None; qed.", ); + queue + .metrics + .observe_execution_queued_time(job.waiting_since.elapsed().as_millis() as u32); let execution_timer = queue.metrics.time_execution(); queue.mux.push( async move { diff --git a/polkadot/node/core/pvf/src/metrics.rs b/polkadot/node/core/pvf/src/metrics.rs index 7fd876cf1740..bc8d300037fe 100644 --- a/polkadot/node/core/pvf/src/metrics.rs +++ b/polkadot/node/core/pvf/src/metrics.rs @@ -74,6 +74,12 @@ impl Metrics { self.0.as_ref().map(|metrics| metrics.execution_time.start_timer()) } + pub(crate) fn observe_execution_queued_time(&self, queued_for_millis: u32) { + self.0.as_ref().map(|metrics| { + metrics.execution_queued_time.observe(queued_for_millis as f64 / 1000 as f64) + }); + } + /// Observe memory stats for preparation. #[allow(unused_variables)] pub(crate) fn observe_preparation_memory_metrics(&self, memory_stats: MemoryStats) { @@ -112,6 +118,7 @@ struct MetricsInner { execute_finished: prometheus::Counter, preparation_time: prometheus::Histogram, execution_time: prometheus::Histogram, + execution_queued_time: prometheus::Histogram, #[cfg(target_os = "linux")] preparation_max_rss: prometheus::Histogram, // Max. allocated memory, tracked by Jemallocator, polling-based @@ -240,6 +247,31 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + execution_queued_time: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_pvf_execution_queued_time", + "Time spent in queue waiting for PVFs execution job to be assigned", + ).buckets(vec![ + 0.01, + 0.025, + 0.05, + 0.1, + 0.25, + 0.5, + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 12.0, + 24.0, + 48.0, + ]), + )?, + registry, + )?, #[cfg(target_os = "linux")] preparation_max_rss: prometheus::register( prometheus::Histogram::with_opts( From 7f1646eb3837bfa53fb1cb8eabd7a0e1026469b8 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 23 Apr 2024 10:38:20 +0200 Subject: [PATCH 37/74] Add `validate_xcm_nesting` to the `ParentAsUmp` and `ChildParachainRouter` (#4236) This PR: - moves `validate_xcm_nesting` from `XcmpQueue` into the `VersionedXcm` - adds `validate_xcm_nesting` to the `ParentAsUmp` - adds `validate_xcm_nesting` to the `ChildParachainRouter` Based on discussion [here](https://github.com/paritytech/polkadot-sdk/pull/4186#discussion_r1571344270) and/or [here](https://github.com/paritytech/polkadot-sdk/pull/4186#discussion_r1572076666) and/or [here]() ## Question/TODO - [x] To the [comment](https://github.com/paritytech/polkadot-sdk/pull/4186#discussion_r1572072295) - Why was `validate_xcm_nesting` added just to the `XcmpQueue` router and nowhere else? What kind of problem `MAX_XCM_DECODE_DEPTH` is solving? (see [comment](https://github.com/paritytech/polkadot-sdk/pull/4236#discussion_r1574605191)) --- cumulus/pallets/xcmp-queue/src/lib.rs | 17 +--- .../assets/asset-hub-rococo/src/lib.rs | 16 ++-- .../assets/asset-hub-westend/src/lib.rs | 16 ++-- cumulus/primitives/utility/src/lib.rs | 28 ++++++ .../runtime/common/src/integration_tests.rs | 5 +- polkadot/runtime/common/src/xcm_sender.rs | 44 ++++++++- .../src/fungible/benchmarking.rs | 26 ++++-- .../src/fungible/mock.rs | 7 +- .../src/generic/benchmarking.rs | 36 ++++--- .../pallet-xcm-benchmarks/src/generic/mock.rs | 5 +- polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs | 13 ++- polkadot/xcm/src/lib.rs | 93 ++++++++++++++++++- 12 files changed, 237 insertions(+), 69 deletions(-) diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index deced13a9e81..7de2fd809421 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -916,7 +916,8 @@ impl SendXcm for Pallet { let price = T::PriceForSiblingDelivery::price_for_delivery(id, &xcm); let versioned_xcm = T::VersionWrapper::wrap_version(&d, xcm) .map_err(|()| SendError::DestinationUnsupported)?; - validate_xcm_nesting(&versioned_xcm) + versioned_xcm + .validate_xcm_nesting() .map_err(|()| SendError::ExceedsMaxMessageSize)?; Ok(((id, versioned_xcm), price)) @@ -932,10 +933,6 @@ impl SendXcm for Pallet { fn deliver((id, xcm): (ParaId, VersionedXcm<()>)) -> Result { let hash = xcm.using_encoded(sp_io::hashing::blake2_256); - defensive_assert!( - validate_xcm_nesting(&xcm).is_ok(), - "Tickets are valid prior to delivery by trait XCM; qed" - ); match Self::send_fragment(id, XcmpMessageFormat::ConcatenatedVersionedXcm, xcm) { Ok(_) => { @@ -950,16 +947,6 @@ impl SendXcm for Pallet { } } -/// Checks that the XCM is decodable with `MAX_XCM_DECODE_DEPTH`. -/// -/// Note that this uses the limit of the sender - not the receiver. It it best effort. -pub(crate) fn validate_xcm_nesting(xcm: &VersionedXcm<()>) -> Result<(), ()> { - xcm.using_encoded(|mut enc| { - VersionedXcm::<()>::decode_all_with_depth_limit(MAX_XCM_DECODE_DEPTH, &mut enc).map(|_| ()) - }) - .map_err(|_| ()) -} - impl FeeTracker for Pallet { type Id = ParaId; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 5cb29343a1cf..201647ac2ebf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1519,27 +1519,23 @@ impl_runtime_apis! { fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles.saturating_sub(1); + let holding_fungibles = holding_non_fungibles.saturating_sub(2); // -2 for two `iter::once` bellow let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) + (0..holding_fungibles) .map(|i| { Asset { id: GeneralIndex(i as u128).into(), - fun: Fungible(fungibles_amount * i as u128), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount } }) .chain(core::iter::once(Asset { id: Here.into(), fun: Fungible(u128::MAX) })) + .chain(core::iter::once(Asset { id: AssetId(TokenLocation::get()), fun: Fungible(1_000_000 * UNITS) })) .chain((0..holding_non_fungibles).map(|i| Asset { id: GeneralIndex(i as u128).into(), fun: NonFungible(asset_instance_from(i)), })) - .collect::>(); - - assets.push(Asset { - id: AssetId(TokenLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() + .collect::>() + .into() } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 366fb91723ae..78c83cf6922a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1610,27 +1610,23 @@ impl_runtime_apis! { fn worst_case_holding(depositable_count: u32) -> xcm::v4::Assets { // A mix of fungible, non-fungible, and concrete assets. let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles - 1; + let holding_fungibles = holding_non_fungibles - 2; // -2 for two `iter::once` bellow let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) + (0..holding_fungibles) .map(|i| { Asset { id: AssetId(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount } }) .chain(core::iter::once(Asset { id: AssetId(Here.into()), fun: Fungible(u128::MAX) })) + .chain(core::iter::once(Asset { id: AssetId(WestendLocation::get()), fun: Fungible(1_000_000 * UNITS) })) .chain((0..holding_non_fungibles).map(|i| Asset { id: AssetId(GeneralIndex(i as u128).into()), fun: NonFungible(asset_instance_from(i)), })) - .collect::>(); - - assets.push(Asset { - id: AssetId(WestendLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() + .collect::>() + .into() } } diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index d5d411356dc3..54f40bd01097 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -69,6 +69,9 @@ where let price = P::price_for_delivery((), &xcm); let versioned_xcm = W::wrap_version(&d, xcm).map_err(|()| SendError::DestinationUnsupported)?; + versioned_xcm + .validate_xcm_nesting() + .map_err(|()| SendError::ExceedsMaxMessageSize)?; let data = versioned_xcm.encode(); Ok((data, price)) @@ -526,6 +529,8 @@ impl< mod test_xcm_router { use super::*; use cumulus_primitives_core::UpwardMessage; + use frame_support::assert_ok; + use xcm::MAX_XCM_DECODE_DEPTH; /// Validates [`validate`] for required Some(destination) and Some(message) struct OkFixedXcmHashWithAssertingRequiredInputsSender; @@ -621,6 +626,29 @@ mod test_xcm_router { )>(dest.into(), message) ); } + + #[test] + fn parent_as_ump_validate_nested_xcm_works() { + let dest = Parent; + + type Router = ParentAsUmp<(), (), ()>; + + // Message that is not too deeply nested: + let mut good = Xcm(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + + // Check that the good message is validated: + assert_ok!(::validate(&mut Some(dest.into()), &mut Some(good.clone()))); + + // Nesting the message one more time should reject it: + let bad = Xcm(vec![SetAppendix(good)]); + assert_eq!( + Err(SendError::ExceedsMaxMessageSize), + ::validate(&mut Some(dest.into()), &mut Some(bad)) + ); + } } #[cfg(test)] mod test_trader { diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 91b64ef7259c..3e9ac1fc1b15 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -39,7 +39,7 @@ use primitives::{ MAX_CODE_SIZE, }; use runtime_parachains::{ - configuration, origin, paras, shared, Origin as ParaOrigin, ParaLifecycle, + configuration, dmp, origin, paras, shared, Origin as ParaOrigin, ParaLifecycle, }; use sp_core::H256; use sp_io::TestExternalities; @@ -84,6 +84,7 @@ frame_support::construct_runtime!( Paras: paras, ParasShared: shared, ParachainsOrigin: origin, + Dmp: dmp, // Para Onboarding Pallets Registrar: paras_registrar, @@ -201,6 +202,8 @@ impl shared::Config for Test { type DisabledValidators = (); } +impl dmp::Config for Test {} + impl origin::Config for Test {} parameter_types! { diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 0cbc2e603c8e..a712d4381f75 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -119,7 +119,9 @@ where let config = configuration::ActiveConfig::::get(); let para = id.into(); let price = P::price_for_delivery(para, &xcm); - let blob = W::wrap_version(&d, xcm).map_err(|()| DestinationUnsupported)?.encode(); + let versioned_xcm = W::wrap_version(&d, xcm).map_err(|()| DestinationUnsupported)?; + versioned_xcm.validate_xcm_nesting().map_err(|()| ExceedsMaxMessageSize)?; + let blob = versioned_xcm.encode(); dmp::Pallet::::can_queue_downward_message(&config, ¶, &blob) .map_err(Into::::into)?; @@ -236,9 +238,11 @@ impl EnsureForParachain for () { #[cfg(test)] mod tests { use super::*; - use frame_support::parameter_types; + use crate::integration_tests::new_test_ext; + use frame_support::{assert_ok, parameter_types}; use runtime_parachains::FeeTracker; use sp_runtime::FixedU128; + use xcm::MAX_XCM_DECODE_DEPTH; parameter_types! { pub const BaseDeliveryFee: u128 = 300_000_000; @@ -297,4 +301,40 @@ mod tests { (FeeAssetId::get(), result).into() ); } + + #[test] + fn child_parachain_router_validate_nested_xcm_works() { + let dest = Parachain(5555); + + type Router = ChildParachainRouter< + crate::integration_tests::Test, + (), + NoPriceForMessageDelivery, + >; + + // Message that is not too deeply nested: + let mut good = Xcm(vec![ClearOrigin]); + for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { + good = Xcm(vec![SetAppendix(good)]); + } + + new_test_ext().execute_with(|| { + configuration::ActiveConfig::::mutate(|c| { + c.max_downward_message_size = u32::MAX; + }); + + // Check that the good message is validated: + assert_ok!(::validate( + &mut Some(dest.into()), + &mut Some(good.clone()) + )); + + // Nesting the message one more time should reject it: + let bad = Xcm(vec![SetAppendix(good)]); + assert_eq!( + Err(ExceedsMaxMessageSize), + ::validate(&mut Some(dest.into()), &mut Some(bad)) + ); + }); + } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs index 4b77199069d3..d99da9184b5d 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/benchmarking.rs @@ -146,8 +146,6 @@ benchmarks_instance_pallet! { initiate_reserve_withdraw { let (sender_account, sender_location) = account_and_location::(1); - let holding = T::worst_case_holding(1); - let assets_filter = AssetFilter::Definite(holding.clone().into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()); let reserve = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( @@ -157,15 +155,29 @@ benchmarks_instance_pallet! { ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // generate holding and add possible required fees + let holding = if let Some(expected_assets_in_holding) = expected_assets_in_holding { + let mut holding = T::worst_case_holding(1 + expected_assets_in_holding.len() as u32); + for a in expected_assets_in_holding.into_inner() { + holding.push(a); + } + holding + } else { + T::worst_case_holding(1) + }; + let mut executor = new_executor::(sender_location); - executor.set_holding(holding.into()); + executor.set_holding(holding.clone().into()); if let Some(expected_fees_mode) = expected_fees_mode { executor.set_fees_mode(expected_fees_mode); } - if let Some(expected_assets_in_holding) = expected_assets_in_holding { - executor.set_holding(expected_assets_in_holding.into()); - } - let instruction = Instruction::InitiateReserveWithdraw { assets: assets_filter, reserve, xcm: Xcm(vec![]) }; + + let instruction = Instruction::InitiateReserveWithdraw { + // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), + reserve, + xcm: Xcm(vec![]) + }; let xcm = Xcm(vec![instruction]); }: { executor.bench_process(xcm)?; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index d11f64e74944..bf7d4e589de3 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -16,7 +16,7 @@ //! A mock runtime for XCM benchmarking. -use crate::{fungible as xcm_balances_benchmark, mock::*}; +use crate::{fungible as xcm_balances_benchmark, generate_holding_assets, mock::*}; use frame_benchmarking::BenchmarkError; use frame_support::{ derive_impl, parameter_types, @@ -130,9 +130,8 @@ impl crate::Config for Test { Ok(valid_destination) } fn worst_case_holding(depositable_count: u32) -> Assets { - crate::mock_worst_case_holding( - depositable_count, - ::MaxAssetsIntoHolding::get(), + generate_holding_assets( + ::MaxAssetsIntoHolding::get() - depositable_count, ) } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 8c6ed4b5d0e0..760b21f93566 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -19,9 +19,9 @@ use crate::{account_and_location, new_executor, EnsureDelivery, XcmCallOf}; use codec::Encode; use frame_benchmarking::{benchmarks, BenchmarkError}; use frame_support::{dispatch::GetDispatchInfo, traits::fungible::Inspect}; -use sp_std::vec; +use sp_std::{prelude::*, vec}; use xcm::{ - latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight}, + latest::{prelude::*, MaxDispatchErrorLen, MaybeErrorCode, Weight, MAX_ITEMS_IN_ASSETS}, DoubleEncoded, }; use xcm_executor::{ @@ -32,7 +32,6 @@ use xcm_executor::{ benchmarks! { report_holding { let (sender_account, sender_location) = account_and_location::(1); - let holding = T::worst_case_holding(0); let destination = T::valid_destination().map_err(|_| BenchmarkError::Skip)?; let (expected_fees_mode, expected_assets_in_holding) = T::DeliveryHelper::ensure_successful_delivery( @@ -42,14 +41,22 @@ benchmarks! { ); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // generate holding and add possible required fees + let holding = if let Some(expected_assets_in_holding) = expected_assets_in_holding { + let mut holding = T::worst_case_holding(expected_assets_in_holding.len() as u32); + for a in expected_assets_in_holding.into_inner() { + holding.push(a); + } + holding + } else { + T::worst_case_holding(0) + }; + let mut executor = new_executor::(sender_location); executor.set_holding(holding.clone().into()); if let Some(expected_fees_mode) = expected_fees_mode { executor.set_fees_mode(expected_fees_mode); } - if let Some(expected_assets_in_holding) = expected_assets_in_holding { - executor.set_holding(expected_assets_in_holding.into()); - } let instruction = Instruction::>::ReportHolding { response_info: QueryResponseInfo { @@ -57,8 +64,8 @@ benchmarks! { query_id: Default::default(), max_weight: Weight::MAX, }, - // Worst case is looking through all holdings for every asset explicitly. - assets: Definite(holding), + // Worst case is looking through all holdings for every asset explicitly - respecting the limit `MAX_ITEMS_IN_ASSETS`. + assets: Definite(holding.into_inner().into_iter().take(MAX_ITEMS_IN_ASSETS).collect::>().into()), }; let xcm = Xcm(vec![instruction]); @@ -612,14 +619,19 @@ benchmarks! { let sender_account = T::AccountIdConverter::convert_location(&owner).unwrap(); let sender_account_balance_before = T::TransactAsset::balance(&sender_account); + // generate holding and add possible required fees + let mut holding: Assets = asset.clone().into(); + if let Some(expected_assets_in_holding) = expected_assets_in_holding { + for a in expected_assets_in_holding.into_inner() { + holding.push(a); + } + }; + let mut executor = new_executor::(owner); - executor.set_holding(asset.clone().into()); + executor.set_holding(holding.into()); if let Some(expected_fees_mode) = expected_fees_mode { executor.set_fees_mode(expected_fees_mode); } - if let Some(expected_assets_in_holding) = expected_assets_in_holding { - executor.set_holding(expected_assets_in_holding.into()); - } let instruction = Instruction::LockAsset { asset, unlocker }; let xcm = Xcm(vec![instruction]); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index f41df017b9db..da0f28ccf28d 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -132,9 +132,8 @@ impl crate::Config for Test { Ok(valid_destination) } fn worst_case_holding(depositable_count: u32) -> Assets { - crate::mock_worst_case_holding( - depositable_count, - ::MaxAssetsIntoHolding::get(), + generate_holding_assets( + ::MaxAssetsIntoHolding::get() - depositable_count, ) } } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 63ed0ac0ca73..a43f27bf47e7 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -50,6 +50,8 @@ pub trait Config: frame_system::Config { fn valid_destination() -> Result; /// Worst case scenario for a holding account in this runtime. + /// - `depositable_count` specifies the count of assets we plan to add to the holding on top of + /// those generated by the `worst_case_holding` implementation. fn worst_case_holding(depositable_count: u32) -> Assets; } @@ -64,19 +66,22 @@ pub type AssetTransactorOf = <::XcmConfig as XcmConfig>::AssetTr /// The call type of executor's config. Should eventually resolve to the same overarching call type. pub type XcmCallOf = <::XcmConfig as XcmConfig>::RuntimeCall; -pub fn mock_worst_case_holding(depositable_count: u32, max_assets: u32) -> Assets { +pub fn generate_holding_assets(max_assets: u32) -> Assets { let fungibles_amount: u128 = 100; - let holding_fungibles = max_assets / 2 - depositable_count; - let holding_non_fungibles = holding_fungibles; + let holding_fungibles = max_assets / 2; + let holding_non_fungibles = max_assets - holding_fungibles - 1; // -1 because of adding `Here` asset + // add count of `holding_fungibles` (0..holding_fungibles) .map(|i| { Asset { id: AssetId(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), + fun: Fungible(fungibles_amount * (i + 1) as u128), // non-zero amount } .into() }) + // add one more `Here` asset .chain(core::iter::once(Asset { id: AssetId(Here.into()), fun: Fungible(u128::MAX) })) + // add count of `holding_non_fungibles` .chain((0..holding_non_fungibles).map(|i| Asset { id: AssetId(GeneralIndex(i as u128).into()), fun: NonFungible(asset_instance_from(i)), diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index e836486e86e3..198020ea1261 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use derivative::Derivative; -use parity_scale_codec::{Decode, Encode, Error as CodecError, Input, MaxEncodedLen}; +use parity_scale_codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedLen}; use scale_info::TypeInfo; pub mod v2; @@ -456,6 +456,23 @@ impl IdentifyVersion for VersionedXcm { } } +impl VersionedXcm { + /// Checks that the XCM is decodable with `MAX_XCM_DECODE_DEPTH`. Consequently, it also checks + /// all decode implementations and limits, such as MAX_ITEMS_IN_ASSETS or + /// MAX_INSTRUCTIONS_TO_DECODE. + /// + /// Note that this uses the limit of the sender - not the receiver. It is a best effort. + pub fn validate_xcm_nesting(&self) -> Result<(), ()> { + self.using_encoded(|mut enc| { + Self::decode_all_with_depth_limit(MAX_XCM_DECODE_DEPTH, &mut enc).map(|_| ()) + }) + .map_err(|e| { + log::error!(target: "xcm::validate_xcm_nesting", "Decode error: {e:?} for xcm: {self:?}!"); + () + }) + } +} + impl From> for VersionedXcm { fn from(x: v2::Xcm) -> Self { VersionedXcm::V2(x) @@ -704,3 +721,77 @@ fn size_limits() { } assert!(!test_failed); } + +#[test] +fn validate_xcm_nesting_works() { + use crate::latest::{ + prelude::{GeneralIndex, ReserveAssetDeposited, SetAppendix}, + Assets, Xcm, MAX_INSTRUCTIONS_TO_DECODE, MAX_ITEMS_IN_ASSETS, + }; + + // closure generates assets of `count` + let assets = |count| { + let mut assets = Assets::new(); + for i in 0..count { + assets.push((GeneralIndex(i as u128), 100).into()); + } + assets + }; + + // closer generates `Xcm` with nested instructions of `depth` + let with_instr = |depth| { + let mut xcm = Xcm::<()>(vec![]); + for _ in 0..depth - 1 { + xcm = Xcm::<()>(vec![SetAppendix(xcm)]); + } + xcm + }; + + // `MAX_INSTRUCTIONS_TO_DECODE` check + assert!(VersionedXcm::<()>::from(Xcm(vec![ + ReserveAssetDeposited(assets(1)); + (MAX_INSTRUCTIONS_TO_DECODE - 1) as usize + ])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ + ReserveAssetDeposited(assets(1)); + MAX_INSTRUCTIONS_TO_DECODE as usize + ])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ + ReserveAssetDeposited(assets(1)); + (MAX_INSTRUCTIONS_TO_DECODE + 1) as usize + ])) + .validate_xcm_nesting() + .is_err()); + + // `MAX_XCM_DECODE_DEPTH` check + assert!(VersionedXcm::<()>::from(with_instr(MAX_XCM_DECODE_DEPTH - 1)) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(with_instr(MAX_XCM_DECODE_DEPTH)) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(with_instr(MAX_XCM_DECODE_DEPTH + 1)) + .validate_xcm_nesting() + .is_err()); + + // `MAX_ITEMS_IN_ASSETS` check + assert!(VersionedXcm::<()>::from(Xcm(vec![ReserveAssetDeposited(assets( + MAX_ITEMS_IN_ASSETS + ))])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ReserveAssetDeposited(assets( + MAX_ITEMS_IN_ASSETS - 1 + ))])) + .validate_xcm_nesting() + .is_ok()); + assert!(VersionedXcm::<()>::from(Xcm(vec![ReserveAssetDeposited(assets( + MAX_ITEMS_IN_ASSETS + 1 + ))])) + .validate_xcm_nesting() + .is_err()); +} From ac4f421f0b99b73bbf80710206e9ac1463e8cb0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Apr 2024 11:51:11 +0200 Subject: [PATCH 38/74] parachains_coretime: Expose `MaxXCMTransactWeight` (#4189) This should be configured on the runtime level and not somewhere inside the pallet. --------- Co-authored-by: Adrian Catangiu Co-authored-by: Branislav Kontur --- .../runtime/parachains/src/coretime/migration.rs | 8 ++++---- polkadot/runtime/parachains/src/coretime/mod.rs | 15 +++++++++------ polkadot/runtime/parachains/src/mock.rs | 2 ++ polkadot/runtime/rococo/src/lib.rs | 2 ++ polkadot/runtime/westend/src/lib.rs | 2 ++ prdoc/pr_4189.prdoc | 16 ++++++++++++++++ 6 files changed, 35 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_4189.prdoc diff --git a/polkadot/runtime/parachains/src/coretime/migration.rs b/polkadot/runtime/parachains/src/coretime/migration.rs index 72eda1ea3f3c..4f52fc99ec30 100644 --- a/polkadot/runtime/parachains/src/coretime/migration.rs +++ b/polkadot/runtime/parachains/src/coretime/migration.rs @@ -222,7 +222,7 @@ mod v_coretime { mask: CoreMask::complete(), assignment: CoreAssignment::Task(p.into()), }]); - mk_coretime_call(crate::coretime::CoretimeCalls::Reserve(schedule)) + mk_coretime_call::(crate::coretime::CoretimeCalls::Reserve(schedule)) }); let leases = lease_holding.into_iter().filter_map(|p| { @@ -243,14 +243,14 @@ mod v_coretime { let round_up = if valid_until % TIME_SLICE_PERIOD > 0 { 1 } else { 0 }; let time_slice = valid_until / TIME_SLICE_PERIOD + TIME_SLICE_PERIOD * round_up; log::trace!(target: "coretime-migration", "Sending of lease holding para {:?}, valid_until: {:?}, time_slice: {:?}", p, valid_until, time_slice); - Some(mk_coretime_call(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) + Some(mk_coretime_call::(crate::coretime::CoretimeCalls::SetLease(p.into(), time_slice))) }); let core_count: u16 = configuration::ActiveConfig::::get() .scheduler_params .num_cores .saturated_into(); - let set_core_count = iter::once(mk_coretime_call( + let set_core_count = iter::once(mk_coretime_call::( crate::coretime::CoretimeCalls::NotifyCoreCount(core_count), )); @@ -261,7 +261,7 @@ mod v_coretime { }]); // Reserved cores will come before lease cores, so cores will change their assignments // when coretime chain sends us their assign_core calls -> Good test. - mk_coretime_call(crate::coretime::CoretimeCalls::Reserve(schedule)) + mk_coretime_call::(crate::coretime::CoretimeCalls::Reserve(schedule)) }); let message_content = iter::once(Instruction::UnpaidExecution { diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 9095cd90ae0c..a30f7336f692 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -110,6 +110,11 @@ pub mod pallet { /// Something that provides the weight of this pallet. type WeightInfo: WeightInfo; type SendXcm: SendXcm; + + /// Maximum weight for any XCM transact call that should be executed on the coretime chain. + /// + /// Basically should be `max_weight(set_leases, reserve, notify_core_count)`. + type MaxXcmTransactWeight: Get; } #[pallet::event] @@ -225,7 +230,7 @@ impl Pallet { weight_limit: WeightLimit::Unlimited, check_origin: None, }, - mk_coretime_call(crate::coretime::CoretimeCalls::NotifyCoreCount(core_count)), + mk_coretime_call::(crate::coretime::CoretimeCalls::NotifyCoreCount(core_count)), ]); if let Err(err) = send_xcm::( Location::new(0, [Junction::Parachain(T::BrokerId::get())]), @@ -244,7 +249,7 @@ impl Pallet { weight_limit: WeightLimit::Unlimited, check_origin: None, }, - mk_coretime_call(crate::coretime::CoretimeCalls::SwapLeases(one, other)), + mk_coretime_call::(crate::coretime::CoretimeCalls::SwapLeases(one, other)), ]); if let Err(err) = send_xcm::( Location::new(0, [Junction::Parachain(T::BrokerId::get())]), @@ -261,12 +266,10 @@ impl OnNewSession> for Pallet { } } -fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { +fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruction<()> { Instruction::Transact { origin_kind: OriginKind::Superuser, - // Largest call is set_lease with 1526 byte: - // Longest call is reserve() with 31_000_000 - require_weight_at_most: Weight::from_parts(170_000_000, 20_000), + require_weight_at_most: T::MaxXcmTransactWeight::get(), call: BrokerRuntimePallets::Broker(call).encode().into(), } } diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index c91f5be127cd..97a75d47ff77 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -387,6 +387,7 @@ impl assigner_coretime::Config for Test {} parameter_types! { pub const BrokerId: u32 = 10u32; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(10_000_000, 10_000); } impl coretime::Config for Test { @@ -396,6 +397,7 @@ impl coretime::Config for Test { type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } pub struct DummyXcmSender; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index ba80fa6942c7..1cfe9adfe13d 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1057,6 +1057,7 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } impl coretime::Config for Runtime { @@ -1066,6 +1067,7 @@ impl coretime::Config for Runtime { type BrokerId = BrokerId; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 02933efff944..7924939c79bd 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1188,6 +1188,7 @@ impl parachains_scheduler::Config for Runtime { parameter_types! { pub const BrokerId: u32 = BROKER_ID; + pub MaxXcmTransactWeight: Weight = Weight::from_parts(200_000_000, 20_000); } impl coretime::Config for Runtime { @@ -1197,6 +1198,7 @@ impl coretime::Config for Runtime { type BrokerId = BrokerId; type WeightInfo = weights::runtime_parachains_coretime::WeightInfo; type SendXcm = crate::xcm_config::XcmRouter; + type MaxXcmTransactWeight = MaxXcmTransactWeight; } parameter_types! { diff --git a/prdoc/pr_4189.prdoc b/prdoc/pr_4189.prdoc new file mode 100644 index 000000000000..74ed7c67cbde --- /dev/null +++ b/prdoc/pr_4189.prdoc @@ -0,0 +1,16 @@ +title: "polkadot_runtime_parachains::coretime: Expose `MaxXcmTransactWeight`" + +doc: + - audience: Runtime Dev + description: | + Expose `MaxXcmTransactWeight` via the `Config` trait. This exposes the + possibility for runtime implementors to set the maximum weight required + for the calls on the coretime chain. Basically it needs to be set to + `max_weight(set_leases, reserve, notify_core_count)` where `set_leases` + etc are the calls on the coretime chain. This ensures that these XCM + transact calls send by the relay chain coretime pallet to the coretime + chain can be dispatched. + +crates: + - name: polkadot-runtime-parachains + bump: major From f7c1e0cf10f8c5c179697e7dfff1191c3095e47a Mon Sep 17 00:00:00 2001 From: AlexWang Date: Tue, 23 Apr 2024 22:49:39 +1200 Subject: [PATCH 39/74] Add OnFinality polkadot bootnode (#4247) This is for adding onfinality polkadot bootnode. Please correct me if this is not the right place for adding a new bootnode --- polkadot/node/service/chain-specs/polkadot.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 5f8d88102d7e..035705437072 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -37,7 +37,8 @@ "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", - "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ" + "/dns/boot-polkadot.luckyfriday.io/tcp/443/wss/p2p/12D3KooWAdyiVAaeGdtBt6vn5zVetwA4z4qfm9Fi2QCSykN1wTBJ", + "/dns4/polkadot-0.boot.onfinality.io/tcp/24446/ws/p2p/12D3KooWT1PWaNdAwYrSr89dvStnoGdH3t4LNRbcVNN4JCtsotkR" ], "telemetryEndpoints": [ [ From 5f2e66f5d0151e1ca9be1e0343147f24a735c476 Mon Sep 17 00:00:00 2001 From: sfuhfds Date: Tue, 23 Apr 2024 18:53:50 +0800 Subject: [PATCH 40/74] chore: fix some typos (#4253) --- polkadot/runtime/common/src/crowdloan/mod.rs | 2 +- polkadot/runtime/common/src/paras_registrar/mod.rs | 2 +- polkadot/runtime/parachains/src/inclusion/mod.rs | 2 +- polkadot/runtime/parachains/src/paras/mod.rs | 2 +- polkadot/runtime/parachains/src/paras_inherent/mod.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index 12078871a195..477530467fa1 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -866,7 +866,7 @@ mod tests { use sp_core::H256; use std::{cell::RefCell, collections::BTreeMap, sync::Arc}; // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::{ crowdloan, mock::TestRegistrar, diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index cc949c9d3f62..a49ebab3e26a 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -412,7 +412,7 @@ pub mod pallet { /// validators have reported on the validity of the code, the code will either be enacted /// or the upgrade will be rejected. If the code will be enacted, the current code of the /// parachain will be overwritten directly. This means that any PoV will be checked by this - /// new code. The parachain itself will not be informed explictely that the validation code + /// new code. The parachain itself will not be informed explicitly that the validation code /// has changed. /// /// Can be called by Root, the parachain, or the parachain manager if the parachain is diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 76caf740ebca..31befefa3220 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -245,7 +245,7 @@ pub enum AggregateMessageOrigin { /// Identifies a UMP queue inside the `MessageQueue` pallet. /// /// It is written in verbose form since future variants like `Here` and `Bridged` are already -/// forseeable. +/// foreseeable. #[derive(Encode, Decode, Clone, MaxEncodedLen, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum UmpQueueId { /// The message originated from this parachain. diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index 6f67c4b8c03d..36a693bcc8e2 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -641,7 +641,7 @@ pub mod pallet { /// /// This is only used at genesis or by root. /// - /// TODO: Remove once coretime is the standard accross all chains. + /// TODO: Remove once coretime is the standard across all chains. type AssignCoretime: AssignCoretime; } diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index 2c6c48acc6d4..ac4cf5dc8d41 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -1099,7 +1099,7 @@ fn limit_and_sanitize_disputes< } // Helper function for filtering candidates which don't pass the given predicate. When/if the first -// candidate which failes the predicate is found, all the other candidates that follow are dropped. +// candidate which failed the predicate is found, all the other candidates that follow are dropped. fn retain_candidates< T: inclusion::Config + paras::Config + inclusion::Config, F: FnMut(ParaId, &mut C) -> bool, From 118cd6f922acc9c4b3938645cd34098275d41c93 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 23 Apr 2024 13:40:05 +0200 Subject: [PATCH 41/74] Ensure outbound XCMs are decodable with limits + add `EnsureDecodableXcm` router (for testing purposes) (#4186) This PR: - adds `EnsureDecodableXcm` (testing) router that attempts to *encode* and *decode* passed XCM `message` to ensure that the receiving side will be able to decode, at least with the same XCM version. - fixes `pallet_xcm` / `pallet_xcm_benchmarks` assets data generation Relates to investigation of https://substrate.stackexchange.com/questions/11288 and missing fix https://github.com/paritytech/polkadot-sdk/pull/2129 which did not get into the fellows 1.1.X release. ## Questions/TODOs - [x] fix XCM benchmarks, which produces undecodable data - new router catched at least two cases - `BoundedVec exceeds its limit` - `Fungible asset of zero amount is not allowed` - [x] do we need to add `sort` to the `prepend_with` as we did for reanchor [here](https://github.com/paritytech/polkadot-sdk/pull/2129)? @serban300 (**created separate/follow-up PR**: https://github.com/paritytech/polkadot-sdk/pull/4235) - [x] We added decoding check to `XcmpQueue` -> `validate_xcm_nesting`, why not to added to the `ParentAsUmp` or `ChildParachainRouter`? @franciscoaguirre (**created separate/follow-up PR**: https://github.com/paritytech/polkadot-sdk/pull/4236) - [ ] `SendController::send_blob` replace `VersionedXcm::<()>::decode(` with `VersionedXcm::<()>::decode_with_depth_limit(MAX_XCM_DECODE_DEPTH, data)` ? --------- Co-authored-by: Adrian Catangiu --- Cargo.lock | 8 ----- polkadot/runtime/test-runtime/Cargo.toml | 10 ------ .../runtime/test-runtime/constants/Cargo.toml | 6 ---- .../src/fungible/mock.rs | 6 ++-- .../pallet-xcm-benchmarks/src/generic/mock.rs | 5 +-- polkadot/xcm/pallet-xcm/src/mock.rs | 12 ++++--- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- polkadot/xcm/xcm-builder/src/routing.rs | 34 +++++++++++++++++++ polkadot/xcm/xcm-builder/src/tests/mock.rs | 7 ++-- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 12 ++++--- .../xcm-simulator/example/src/parachain.rs | 10 +++--- .../xcm-simulator/example/src/relay_chain.rs | 8 ++--- 12 files changed, 70 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fa5c42c1fa32..62479cce2a0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14348,7 +14348,6 @@ dependencies = [ name = "polkadot-test-runtime" version = "1.0.0" dependencies = [ - "bitvec", "frame-election-provider-support", "frame-executive", "frame-support", @@ -14373,16 +14372,12 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "parity-scale-codec", - "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", - "rustc-hex", "scale-info", "serde", - "serde_derive", "serde_json", - "smallvec", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -21273,11 +21268,8 @@ version = "1.0.0" dependencies = [ "frame-support", "polkadot-primitives", - "polkadot-runtime-common", "smallvec", - "sp-core", "sp-runtime", - "sp-weights", ] [[package]] diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 35fb684597e7..6552ed4ef8ae 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -11,14 +11,10 @@ license.workspace = true workspace = true [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } log = { workspace = true } -rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { workspace = true } -serde_derive = { optional = true, workspace = true } -smallvec = "1.8.0" authority-discovery-primitives = { package = "sp-authority-discovery", path = "../../../substrate/primitives/authority-discovery", default-features = false } babe-primitives = { package = "sp-consensus-babe", path = "../../../substrate/primitives/consensus/babe", default-features = false } @@ -63,7 +59,6 @@ pallet-vesting = { path = "../../../substrate/frame/vesting", default-features = runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -polkadot-parachain-primitives = { path = "../../parachain", default-features = false } polkadot-runtime-parachains = { path = "../parachains", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executor", default-features = false } @@ -92,7 +87,6 @@ std = [ "authority-discovery-primitives/std", "babe-primitives/std", "beefy-primitives/std", - "bitvec/std", "block-builder-api/std", "frame-election-provider-support/std", "frame-executive/std", @@ -118,14 +112,11 @@ std = [ "pallet-vesting/std", "pallet-xcm/std", "parity-scale-codec/std", - "polkadot-parachain-primitives/std", "polkadot-runtime-parachains/std", "primitives/std", "runtime-common/std", - "rustc-hex/std", "scale-info/std", "serde/std", - "serde_derive", "sp-api/std", "sp-core/std", "sp-genesis-builder/std", @@ -157,7 +148,6 @@ runtime-benchmarks = [ "pallet-timestamp/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-xcm/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", "primitives/runtime-benchmarks", "runtime-common/runtime-benchmarks", diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index 2b387bbd3072..5b8a4d7a051a 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -14,18 +14,12 @@ smallvec = "1.8.0" frame-support = { path = "../../../../substrate/frame/support", default-features = false } primitives = { package = "polkadot-primitives", path = "../../../primitives", default-features = false } -runtime-common = { package = "polkadot-runtime-common", path = "../../common", default-features = false } sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } -sp-weights = { path = "../../../../substrate/primitives/weights", default-features = false } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false } [features] default = ["std"] std = [ "frame-support/std", "primitives/std", - "runtime-common/std", - "sp-core/std", "sp-runtime/std", - "sp-weights/std", ] diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index bf7d4e589de3..7233b46d0cd6 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -23,7 +23,9 @@ use frame_support::{ traits::{Everything, Nothing}, }; use xcm::latest::prelude::*; -use xcm_builder::{AllowUnpaidExecutionFrom, FrameTransactionalProcessor, MintLocation}; +use xcm_builder::{ + AllowUnpaidExecutionFrom, EnsureDecodableXcm, FrameTransactionalProcessor, MintLocation, +}; type Block = frame_system::mocking::MockBlock; @@ -91,7 +93,7 @@ parameter_types! { pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = DevNull; + type XcmSender = EnsureDecodableXcm; type AssetTransactor = AssetTransactor; type OriginConverter = (); type IsReserve = TrustedReserves; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index da0f28ccf28d..a9f4d37d7a55 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -28,7 +28,8 @@ use xcm_builder::{ AssetsInHolding, TestAssetExchanger, TestAssetLocker, TestAssetTrap, TestSubscriptionService, TestUniversalAliases, }, - AliasForeignAccountId32, AllowUnpaidExecutionFrom, FrameTransactionalProcessor, + AliasForeignAccountId32, AllowUnpaidExecutionFrom, EnsureDecodableXcm, + FrameTransactionalProcessor, }; use xcm_executor::traits::ConvertOrigin; @@ -81,7 +82,7 @@ type Aliasers = AliasForeignAccountId32; pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = DevNull; + type XcmSender = EnsureDecodableXcm; type AssetTransactor = NoAssetTransactor; type OriginConverter = AlwaysSignedByDefault; type IsReserve = AllAssetLocationsPass; diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index e3680c530e24..8e94803e8431 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -33,10 +33,11 @@ use xcm::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, DescribeAllTerminal, FixedRateOfFungible, FixedWeightBounds, - FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, HashedDescription, IsConcrete, - MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, + ChildSystemParachainAsSuperuser, DescribeAllTerminal, EnsureDecodableXcm, FixedRateOfFungible, + FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, + HashedDescription, IsConcrete, MatchedConvertedConcreteId, NoChecking, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::{ traits::{Identity, JustTry}, @@ -488,7 +489,8 @@ pub type Barrier = ( AllowSubscriptionsFrom, ); -pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); +pub type XcmRouter = + EnsureDecodableXcm<(TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm)>; pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index bd4a4c941c91..cdc663a0cc9b 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -119,7 +119,7 @@ mod process_xcm_message; pub use process_xcm_message::ProcessXcmMessage; mod routing; -pub use routing::{EnsureDelivery, WithTopicSource, WithUniqueTopic}; +pub use routing::{EnsureDecodableXcm, EnsureDelivery, WithTopicSource, WithUniqueTopic}; mod transactional; pub use transactional::FrameTransactionalProcessor; diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 529ef80c15ff..921b9ac5922e 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -139,3 +139,37 @@ impl EnsureDelivery for Tuple { (None, None) } } + +/// A wrapper router that attempts to *encode* and *decode* passed XCM `message` to ensure that the +/// receiving side will be able to decode, at least with the same XCM version. +/// +/// This is designed to be at the top-level of any routers which do the real delivery. While other +/// routers can manipulate the `message`, we cannot access the final XCM due to the generic +/// `Inner::Ticket`. Therefore, this router aims to validate at least the passed `message`. +/// +/// NOTE: For use in mock runtimes which don't have the DMP/UMP/HRMP XCM validations. +pub struct EnsureDecodableXcm(sp_std::marker::PhantomData); +impl SendXcm for EnsureDecodableXcm { + type Ticket = Inner::Ticket; + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + if let Some(msg) = message { + let versioned_xcm = VersionedXcm::<()>::from(msg.clone()); + if versioned_xcm.validate_xcm_nesting().is_err() { + log::error!( + target: "xcm::validate_xcm_nesting", + "EnsureDecodableXcm validate_xcm_nesting error for \nversioned_xcm: {versioned_xcm:?}\nbased on xcm: {msg:?}" + ); + return Err(SendError::Transport("EnsureDecodableXcm validate_xcm_nesting error")) + } + } + Inner::validate(destination, message) + } + + fn deliver(ticket: Self::Ticket) -> Result { + Inner::deliver(ticket) + } +} diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 3d03ab054248..7532b97d97b3 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -19,6 +19,7 @@ use crate::{ barriers::{AllowSubscriptionsFrom, RespectSuspension, TrailingSetTopicAsId}, test_utils::*, + EnsureDecodableXcm, }; pub use crate::{ AliasForeignAccountId32, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, @@ -165,8 +166,8 @@ pub fn set_exporter_override( pub fn clear_exporter_override() { EXPORTER_OVERRIDE.with(|x| x.replace(None)); } -pub struct TestMessageSender; -impl SendXcm for TestMessageSender { +pub struct TestMessageSenderImpl; +impl SendXcm for TestMessageSenderImpl { type Ticket = (Location, Xcm<()>, XcmHash); fn validate( dest: &mut Option, @@ -183,6 +184,8 @@ impl SendXcm for TestMessageSender { Ok(hash) } } +pub type TestMessageSender = EnsureDecodableXcm; + pub struct TestMessageExporter; impl ExportXcm for TestMessageExporter { type Ticket = (NetworkId, u32, InteriorLocation, InteriorLocation, Xcm<()>, XcmHash); diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index da38538b60c3..46ec23beebc1 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -35,9 +35,9 @@ use staging_xcm_builder as xcm_builder; use xcm_builder::{ AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - FixedRateOfFungible, FixedWeightBounds, FungibleAdapter, IsChildSystemParachain, IsConcrete, - MintLocation, RespectSuspension, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, + EnsureDecodableXcm, FixedRateOfFungible, FixedWeightBounds, FungibleAdapter, + IsChildSystemParachain, IsConcrete, MintLocation, RespectSuspension, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, }; pub type AccountId = AccountId32; @@ -68,6 +68,8 @@ impl SendXcm for TestSendXcm { } } +pub type TestXcmRouter = EnsureDecodableXcm; + // copied from kusama constants pub const UNITS: Balance = 1_000_000_000_000; pub const CENTS: Balance = UNITS / 30_000; @@ -180,7 +182,7 @@ pub type TrustedTeleporters = (xcm_builder::Case,); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = TestSendXcm; + type XcmSender = TestXcmRouter; type AssetTransactor = LocalAssetTransactor; type OriginConverter = LocalOriginConverter; type IsReserve = (); @@ -215,7 +217,7 @@ impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = TestSendXcm; + type XcmRouter = TestXcmRouter; // Anyone can execute XCM messages locally... type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Nothing; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index c155ed5ab636..41e62596392e 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -40,10 +40,10 @@ use polkadot_parachain_primitives::primitives::{ use xcm::{latest::prelude::*, VersionedXcm}; use xcm_builder::{ Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, ConvertedConcreteId, - EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, FrameTransactionalProcessor, - FungibleAdapter, IsConcrete, NativeAsset, NoChecking, NonFungiblesAdapter, ParentIsPreset, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, + EnsureDecodableXcm, EnsureXcmOrigin, FixedRateOfFungible, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, NoChecking, + NonFungiblesAdapter, ParentIsPreset, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, }; use xcm_executor::{ traits::{ConvertLocation, JustTry}, @@ -212,7 +212,7 @@ pub type LocalAssetTransactor = ( >, ); -pub type XcmRouter = super::ParachainXcmRouter; +pub type XcmRouter = EnsureDecodableXcm>; pub type Barrier = AllowUnpaidExecutionFrom; parameter_types! { diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 4e8f1f68ebd6..b41df3cfa2b0 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -36,9 +36,9 @@ use xcm::latest::prelude::*; use xcm_builder::{ Account32Hash, AccountId32Aliases, AllowUnpaidExecutionFrom, AsPrefixedGeneralIndex, ChildParachainAsNative, ChildParachainConvertsVia, ChildSystemParachainAsSuperuser, - ConvertedConcreteId, FixedRateOfFungible, FixedWeightBounds, FrameTransactionalProcessor, - FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, + ConvertedConcreteId, EnsureDecodableXcm, FixedRateOfFungible, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NoChecking, NonFungiblesAdapter, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, }; use xcm_executor::{traits::JustTry, Config, XcmExecutor}; @@ -168,7 +168,7 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -pub type XcmRouter = super::RelayChainXcmRouter; +pub type XcmRouter = EnsureDecodableXcm; pub type Barrier = AllowUnpaidExecutionFrom; pub struct XcmConfig; From eda5e5c31f9bffafd6afd6d14fb95001a10dba9a Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Tue, 23 Apr 2024 14:53:20 +0200 Subject: [PATCH 42/74] Fix Stuck Collator Funds (#4229) Fixes https://github.com/paritytech/polkadot-sdk/issues/4206 In #1340 one of the storage types was changed from `Candidates` to `CandidateList`. Since the actual key includes the hash of this value, all of the candidates stored here are (a) "missing" and (b) unable to unreserve their candidacy bond. This migration kills the storage values and refunds the deposit held for each candidate. --------- Signed-off-by: georgepisaltu Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: georgepisaltu --- cumulus/pallets/collator-selection/Cargo.toml | 3 +- cumulus/pallets/collator-selection/src/lib.rs | 2 +- .../collator-selection/src/migration.rs | 233 +++++++++++++++++- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../collectives-westend/src/lib.rs | 2 +- .../contracts/contracts-rococo/src/lib.rs | 2 + .../coretime/coretime-rococo/src/lib.rs | 1 + .../coretime/coretime-westend/src/lib.rs | 1 + .../runtimes/people/people-rococo/src/lib.rs | 1 + .../runtimes/people/people-westend/src/lib.rs | 1 + prdoc/pr_4229.prdoc | 10 + 14 files changed, 256 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_4229.prdoc diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index c04d9e1403ec..25ca2fe057ba 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -27,6 +27,7 @@ sp-staking = { path = "../../../substrate/primitives/staking", default-features frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -38,7 +39,6 @@ sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-runtime = { path = "../../../substrate/primitives/runtime" } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura" } -pallet-balances = { path = "../../../substrate/frame/balances" } pallet-aura = { path = "../../../substrate/frame/aura" } [features] @@ -59,6 +59,7 @@ std = [ "frame-system/std", "log/std", "pallet-authorship/std", + "pallet-balances/std", "pallet-session/std", "rand/std", "scale-info/std", diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 17bbe2591d48..2fa384367528 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -121,7 +121,7 @@ pub mod pallet { use sp_std::vec::Vec; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; diff --git a/cumulus/pallets/collator-selection/src/migration.rs b/cumulus/pallets/collator-selection/src/migration.rs index 5dc2fba4279a..425acdd8bfb5 100644 --- a/cumulus/pallets/collator-selection/src/migration.rs +++ b/cumulus/pallets/collator-selection/src/migration.rs @@ -17,9 +17,107 @@ //! A module that is responsible for migration of storage for Collator Selection. use super::*; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::traits::{OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}; use log; +/// Migrate to v2. Should have been part of . +pub mod v2 { + use super::*; + use frame_support::{ + pallet_prelude::*, + storage_alias, + traits::{Currency, ReservableCurrency}, + }; + use sp_runtime::traits::{Saturating, Zero}; + #[cfg(feature = "try-runtime")] + use sp_std::vec::Vec; + + /// [`UncheckedMigrationToV2`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 1. + pub type MigrationToV2 = frame_support::migrations::VersionedMigration< + 1, + 2, + UncheckedMigrationToV2, + Pallet, + ::DbWeight, + >; + + #[storage_alias] + pub type Candidates = StorageValue< + Pallet, + BoundedVec::AccountId, <::Currency as Currency<::AccountId>>::Balance>, ::MaxCandidates>, + ValueQuery, + >; + + /// Migrate to V2. + pub struct UncheckedMigrationToV2(sp_std::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for UncheckedMigrationToV2 { + fn on_runtime_upgrade() -> Weight { + let mut weight = Weight::zero(); + let mut count: u64 = 0; + // candidates who exist under the old `Candidates` key + let candidates = Candidates::::take(); + + // New candidates who have registered since the upgrade. Under normal circumstances, + // this should not exist because the migration should be applied when the upgrade + // happens. But in Polkadot/Kusama we messed this up, and people registered under + // `CandidateList` while their funds were locked in `Candidates`. + let new_candidate_list = CandidateList::::get(); + if new_candidate_list.len().is_zero() { + // The new list is empty, so this is essentially being applied correctly. We just + // put the candidates into the new storage item. + CandidateList::::put(&candidates); + // 1 write for the new list + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } else { + // Oops, the runtime upgraded without the migration. There are new candidates in + // `CandidateList`. So, let's just refund the old ones and assume they have already + // started participating in the new system. + for candidate in candidates { + let err = T::Currency::unreserve(&candidate.who, candidate.deposit); + if err > Zero::zero() { + log::error!( + target: LOG_TARGET, + "{:?} balance was unable to be unreserved from {:?}", + err, &candidate.who, + ); + } + count.saturating_inc(); + } + weight.saturating_accrue( + <::WeightInfo as pallet_balances::WeightInfo>::force_unreserve().saturating_mul(count.into()), + ); + } + + log::info!( + target: LOG_TARGET, + "Unreserved locked bond of {} candidates, upgraded storage to version 2", + count, + ); + + weight.saturating_accrue(T::DbWeight::get().reads_writes(3, 2)); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::DispatchError> { + let number_of_candidates = Candidates::::get().to_vec().len(); + Ok((number_of_candidates as u32).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_number_of_candidates: Vec) -> Result<(), sp_runtime::DispatchError> { + let new_number_of_candidates = Candidates::::get().to_vec().len(); + assert_eq!( + new_number_of_candidates, 0 as usize, + "after migration, the candidates map should be empty" + ); + Ok(()) + } + } +} + /// Version 1 Migration /// This migration ensures that any existing `Invulnerables` storage lists are sorted. pub mod v1 { @@ -90,3 +188,136 @@ pub mod v1 { } } } + +#[cfg(all(feature = "try-runtime", test))] +mod tests { + use super::*; + use crate::{ + migration::v2::Candidates, + mock::{new_test_ext, Balances, Test}, + }; + use frame_support::{ + traits::{Currency, ReservableCurrency, StorageVersion}, + BoundedVec, + }; + use sp_runtime::traits::ConstU32; + + #[test] + fn migrate_to_v2_with_new_candidates() { + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(1); + storage_version.put::>(); + + let one = 1u64; + let two = 2u64; + let three = 3u64; + let deposit = 10u64; + + // Set balance to 100 + Balances::make_free_balance_be(&one, 100u64); + Balances::make_free_balance_be(&two, 100u64); + Balances::make_free_balance_be(&three, 100u64); + + // Reservations: 10 for the "old" candidacy and 10 for the "new" + Balances::reserve(&one, 10u64).unwrap(); // old + Balances::reserve(&two, 20u64).unwrap(); // old + new + Balances::reserve(&three, 10u64).unwrap(); // new + + // Candidate info + let candidate_one = CandidateInfo { who: one, deposit }; + let candidate_two = CandidateInfo { who: two, deposit }; + let candidate_three = CandidateInfo { who: three, deposit }; + + // Storage lists + let bounded_candidates = + BoundedVec::, ConstU32<20>>::try_from(vec![ + candidate_one.clone(), + candidate_two.clone(), + ]) + .expect("it works"); + let bounded_candidate_list = + BoundedVec::, ConstU32<20>>::try_from(vec![ + candidate_two.clone(), + candidate_three.clone(), + ]) + .expect("it works"); + + // Set storage + Candidates::::put(bounded_candidates); + CandidateList::::put(bounded_candidate_list.clone()); + + // Sanity check + assert_eq!(Balances::free_balance(one), 90); + assert_eq!(Balances::free_balance(two), 80); + assert_eq!(Balances::free_balance(three), 90); + + // Run migration + v2::MigrationToV2::::on_runtime_upgrade(); + + let new_storage_version = StorageVersion::get::>(); + assert_eq!(new_storage_version, 2); + + // 10 should have been unreserved from the old candidacy + assert_eq!(Balances::free_balance(one), 100); + assert_eq!(Balances::free_balance(two), 90); + assert_eq!(Balances::free_balance(three), 90); + // The storage item should be gone + assert!(Candidates::::get().is_empty()); + // The new storage item should be preserved + assert_eq!(CandidateList::::get(), bounded_candidate_list); + }); + } + + #[test] + fn migrate_to_v2_without_new_candidates() { + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(1); + storage_version.put::>(); + + let one = 1u64; + let two = 2u64; + let deposit = 10u64; + + // Set balance to 100 + Balances::make_free_balance_be(&one, 100u64); + Balances::make_free_balance_be(&two, 100u64); + + // Reservations + Balances::reserve(&one, 10u64).unwrap(); // old + Balances::reserve(&two, 10u64).unwrap(); // old + + // Candidate info + let candidate_one = CandidateInfo { who: one, deposit }; + let candidate_two = CandidateInfo { who: two, deposit }; + + // Storage lists + let bounded_candidates = + BoundedVec::, ConstU32<20>>::try_from(vec![ + candidate_one.clone(), + candidate_two.clone(), + ]) + .expect("it works"); + + // Set storage + Candidates::::put(bounded_candidates.clone()); + + // Sanity check + assert_eq!(Balances::free_balance(one), 90); + assert_eq!(Balances::free_balance(two), 90); + + // Run migration + v2::MigrationToV2::::on_runtime_upgrade(); + + let new_storage_version = StorageVersion::get::>(); + assert_eq!(new_storage_version, 2); + + // Nothing changes deposit-wise + assert_eq!(Balances::free_balance(one), 90); + assert_eq!(Balances::free_balance(two), 90); + // The storage item should be gone + assert!(Candidates::::get().is_empty()); + // The new storage item should have the info now + assert_eq!(CandidateList::::get(), bounded_candidates); + }); + } +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 201647ac2ebf..151734804632 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -973,10 +973,10 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. #[allow(deprecated)] pub type Migrations = ( - pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, + pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 78c83cf6922a..64127c80b6d5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -963,7 +963,7 @@ pub type Migrations = ( // v9420 pallet_nfts::migration::v1::MigrateToV1, // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, // unreleased pallet_multisig::migrations::v1::MigrateToV1, // unreleased diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 1eac813b10ce..109b081f937d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -139,7 +139,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index b4ea2c79f64f..cf09a1acc548 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -118,7 +118,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, // unreleased diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index c599ba37f128..7274e9acdcd6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -722,7 +722,7 @@ pub type UncheckedExtrinsic = /// `OnRuntimeUpgrade`. Included migrations must be idempotent. type Migrations = ( // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, // unreleased cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, // permanent diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index efa26fcbc22d..988195d88d87 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -98,6 +98,8 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v1::MigrateToV1, + pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_parachain_system::migration::Migration, cumulus_pallet_xcmp_queue::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v3::MigrationToV3, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index ad065ee34774..895890da7dd6 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -108,6 +108,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, // permanent diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 0f0742268618..9d080087d5db 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -108,6 +108,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, pallet_broker::migration::MigrateV0ToV1, // permanent diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index 3cd085fec632..4a57bad01c8c 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -102,6 +102,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 307ab90a4772..22e8fd57d3ca 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -102,6 +102,7 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( + pallet_collator_selection::migration::v2::MigrationToV2, // permanent pallet_xcm::migration::MigrateToLatestXcmVersion, ); diff --git a/prdoc/pr_4229.prdoc b/prdoc/pr_4229.prdoc new file mode 100644 index 000000000000..05af8e062a32 --- /dev/null +++ b/prdoc/pr_4229.prdoc @@ -0,0 +1,10 @@ +title: "Fix Stuck Collator Funds" + +doc: + - audience: Runtime Dev + description: | + Fixes stuck collator funds by providing a migration that should have been in PR 1340. + +crates: + - name: pallet-collator-selection + bump: patch From ffbce2a817ec2e7c8b7ce49f7ed6794584f19667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Apr 2024 17:37:24 +0200 Subject: [PATCH 43/74] pallet_broker: Let `start_sales` calculate and request the correct core count (#4221) --- prdoc/pr_4221.prdoc | 15 ++++++++ substrate/frame/broker/src/benchmarking.rs | 10 ++++-- .../frame/broker/src/dispatchable_impls.rs | 10 +++++- substrate/frame/broker/src/lib.rs | 23 +++++------- substrate/frame/broker/src/tests.rs | 36 ++++++++++++++----- 5 files changed, 68 insertions(+), 26 deletions(-) create mode 100644 prdoc/pr_4221.prdoc diff --git a/prdoc/pr_4221.prdoc b/prdoc/pr_4221.prdoc new file mode 100644 index 000000000000..e4941cce892a --- /dev/null +++ b/prdoc/pr_4221.prdoc @@ -0,0 +1,15 @@ +title: "pallet_broker::start_sales: Take `extra_cores` and not total cores" + +doc: + - audience: Runtime User + description: | + Change `pallet_broker::start_sales` to take `extra_cores` and not total cores. + It will calculate the total number of cores to offer based on number of + reservations plus number of leases plus `extra_cores`. Internally it will + also notify the relay chain of the required number of cores. + + Thus, starting the first sales with `pallet-broker` requires less brain power ;) + +crates: +- name: pallet-broker + bump: minor diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 1fc1c3a101ab..7533e3dc68c4 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -189,11 +189,15 @@ mod benches { let config = new_config_record::(); Configuration::::put(config.clone()); + let mut extra_cores = n; + // Assume Reservations to be filled for worst case - setup_reservations::(T::MaxReservedCores::get()); + setup_reservations::(extra_cores.min(T::MaxReservedCores::get())); + extra_cores = extra_cores.saturating_sub(T::MaxReservedCores::get()); // Assume Leases to be filled for worst case - setup_leases::(T::MaxLeasedCores::get(), 1, 10); + setup_leases::(extra_cores.min(T::MaxLeasedCores::get()), 1, 10); + extra_cores = extra_cores.saturating_sub(T::MaxLeasedCores::get()); let latest_region_begin = Broker::::latest_timeslice_ready_to_commit(&config); @@ -203,7 +207,7 @@ mod benches { T::AdminOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; #[extrinsic_call] - _(origin as T::RuntimeOrigin, initial_price, n.try_into().unwrap()); + _(origin as T::RuntimeOrigin, initial_price, extra_cores.try_into().unwrap()); assert!(SaleInfo::::get().is_some()); assert_last_event::( diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index cb7393cc9e32..45a0a514c307 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -70,8 +70,16 @@ impl Pallet { Ok(()) } - pub(crate) fn do_start_sales(price: BalanceOf, core_count: CoreIndex) -> DispatchResult { + pub(crate) fn do_start_sales(price: BalanceOf, extra_cores: CoreIndex) -> DispatchResult { let config = Configuration::::get().ok_or(Error::::Uninitialized)?; + + // Determine the core count + let core_count = Leases::::decode_len().unwrap_or(0) as CoreIndex + + Reservations::::decode_len().unwrap_or(0) as CoreIndex + + extra_cores; + + Self::do_request_core_count(core_count)?; + let commit_timeslice = Self::latest_timeslice_ready_to_commit(&config); let status = StatusRecord { core_count, diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 1ef9e59f0186..d59c4c9c6b24 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -559,27 +559,22 @@ pub mod pallet { /// /// - `origin`: Must be Root or pass `AdminOrigin`. /// - `initial_price`: The price of Bulk Coretime in the first sale. - /// - `total_core_count`: This is the total number of cores the relay chain should have - /// after the sale concludes. + /// - `extra_cores`: Number of extra cores that should be requested on top of the cores + /// required for `Reservations` and `Leases`. /// - /// NOTE: This function does not actually request that new core count from the relay chain. - /// You need to make sure to call `request_core_count` afterwards to bring the relay chain - /// in sync. - /// - /// When to call the function depends on the new core count. If it is larger than what it - /// was before, you can call it immediately or even before `start_sales` as non allocated - /// cores will just be `Idle`. If you are actually reducing the number of cores, you should - /// call `request_core_count`, right before the next sale, to avoid shutting down tasks too - /// early. + /// This will call [`Self::request_core_count`] internally to set the correct core count on + /// the relay chain. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::start_sales((*total_core_count).into()))] + #[pallet::weight(T::WeightInfo::start_sales( + T::MaxLeasedCores::get() + T::MaxReservedCores::get() + *extra_cores as u32 + ))] pub fn start_sales( origin: OriginFor, initial_price: BalanceOf, - total_core_count: CoreIndex, + extra_cores: CoreIndex, ) -> DispatchResultWithPostInfo { T::AdminOrigin::ensure_origin_or_root(origin)?; - Self::do_start_sales(initial_price, total_core_count)?; + Self::do_start_sales(initial_price, extra_cores)?; Ok(Pays::No.into()) } diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index c573b6c55a20..f929f0d50dcf 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -329,7 +329,7 @@ fn nft_metadata_works() { fn migration_works() { TestExt::new().endow(1, 1000).execute_with(|| { assert_ok!(Broker::do_set_lease(1000, 8)); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 1)); // Sale is for regions from TS4..7 // Not ending in this sale period. @@ -385,7 +385,7 @@ fn instapool_payouts_work() { TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); - assert_ok!(Broker::do_start_sales(100, 3)); + assert_ok!(Broker::do_start_sales(100, 2)); advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); assert_ok!(Broker::do_pool(region, None, 2, Final)); @@ -411,7 +411,7 @@ fn instapool_partial_core_payouts_work() { TestExt::new().endow(1, 1000).execute_with(|| { let item = ScheduleItem { assignment: Pool, mask: CoreMask::complete() }; assert_ok!(Broker::do_reserve(Schedule::truncate_from(vec![item]))); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); let region = Broker::do_purchase(1, u64::max_value()).unwrap(); let (region1, region2) = @@ -477,7 +477,7 @@ fn initialize_with_system_paras_works() { ScheduleItem { assignment: Task(4u32), mask: 0x00000_00000_00000_fffff.into() }, ]; assert_ok!(Broker::do_reserve(Schedule::truncate_from(items))); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 0)); advance_to(10); assert_eq!( CoretimeTrace::get(), @@ -510,7 +510,7 @@ fn initialize_with_leased_slots_works() { TestExt::new().execute_with(|| { assert_ok!(Broker::do_set_lease(1000, 6)); assert_ok!(Broker::do_set_lease(1001, 7)); - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 0)); advance_to(18); let end_hint = None; assert_eq!( @@ -925,7 +925,7 @@ fn leases_can_be_renewed() { assert_ok!(Broker::do_set_lease(2001, 9)); assert_eq!(Leases::::get().len(), 1); // Start the sales with only one core for this lease. - assert_ok!(Broker::do_start_sales(100, 1)); + assert_ok!(Broker::do_start_sales(100, 0)); // Advance to sale period 1, we should get an AllowedRenewal for task 2001 for the next // sale. @@ -1018,7 +1018,7 @@ fn short_leases_cannot_be_renewed() { assert_ok!(Broker::do_set_lease(2001, 3)); assert_eq!(Leases::::get().len(), 1); // Start the sales with one core for this lease. - assert_ok!(Broker::do_start_sales(100, 1)); + assert_ok!(Broker::do_start_sales(100, 0)); // The lease is removed. assert_eq!(Leases::::get().len(), 0); @@ -1290,7 +1290,7 @@ fn renewal_works_leases_ended_before_start_sales() { )); // This intializes the first sale and the period 0. - assert_ok!(Broker::do_start_sales(100, 2)); + assert_ok!(Broker::do_start_sales(100, 0)); assert_noop!(Broker::do_renew(1, 1), Error::::Unavailable); assert_noop!(Broker::do_renew(1, 0), Error::::Unavailable); @@ -1408,3 +1408,23 @@ fn renewal_works_leases_ended_before_start_sales() { ); }); } + +#[test] +fn start_sales_sets_correct_core_count() { + TestExt::new().endow(1, 1000).execute_with(|| { + advance_to(1); + + Broker::do_set_lease(1, 100).unwrap(); + Broker::do_set_lease(2, 100).unwrap(); + Broker::do_set_lease(3, 100).unwrap(); + Broker::do_reserve(Schedule::truncate_from(vec![ScheduleItem { + assignment: Pool, + mask: CoreMask::complete(), + }])) + .unwrap(); + + Broker::do_start_sales(5, 5).unwrap(); + + System::assert_has_event(Event::::CoreCountRequested { core_count: 9 }.into()); + }) +} From 0a56d071c75856aacb2bf90dd8aaf29399a28e69 Mon Sep 17 00:00:00 2001 From: gupnik Date: Wed, 24 Apr 2024 11:25:54 +0530 Subject: [PATCH 44/74] Adds ability to trigger tasks via unsigned transactions (#4075) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR updates the `validate_unsigned` hook for `frame_system` to allow valid tasks to be submitted as unsigned transactions. It also updates the task example to be able to submit such transactions via an off-chain worker. --------- Co-authored-by: Bastian Köcher --- prdoc/pr_4075.prdoc | 19 +++++++++++ substrate/frame/examples/tasks/src/lib.rs | 38 +++++++++++++++++++-- substrate/frame/examples/tasks/src/mock.rs | 21 ++++++++++++ substrate/frame/examples/tasks/src/tests.rs | 30 ++++++++++++++++ substrate/frame/support/src/lib.rs | 3 ++ substrate/frame/support/src/traits/tasks.rs | 4 +++ substrate/frame/system/src/lib.rs | 16 +++++++-- 7 files changed, 126 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_4075.prdoc diff --git a/prdoc/pr_4075.prdoc b/prdoc/pr_4075.prdoc new file mode 100644 index 000000000000..05e54073b6c7 --- /dev/null +++ b/prdoc/pr_4075.prdoc @@ -0,0 +1,19 @@ +title: Adds ability to trigger tasks via unsigned transactions + +doc: + - audience: Runtime Dev + description: | + This PR updates the `validate_unsigned` hook for `frame_system` to allow valid tasks + to be submitted as unsigned transactions. It also updates the task example to be able to + submit such transactions via an off-chain worker. + + Note that `is_valid` call on a task MUST be cheap with minimal to no storage reads. + Else, it can make the blockchain vulnerable to DoS attacks. + + Further, these tasks will be executed in a random order. + +crates: + - name: frame-system + bump: patch + - name: pallet-example-tasks + bump: minor diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs index c65d8095bcf6..1908a235ba15 100644 --- a/substrate/frame/examples/tasks/src/lib.rs +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -19,6 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::dispatch::DispatchResult; +use frame_system::offchain::SendTransactionTypes; +#[cfg(feature = "experimental")] +use frame_system::offchain::SubmitTransaction; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; @@ -31,10 +34,14 @@ mod benchmarking; pub mod weights; pub use weights::*; +#[cfg(feature = "experimental")] +const LOG_TARGET: &str = "pallet-example-tasks"; + #[frame_support::pallet(dev_mode)] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; #[pallet::error] pub enum Error { @@ -59,9 +66,36 @@ pub mod pallet { } } + #[pallet::hooks] + impl Hooks> for Pallet { + #[cfg(feature = "experimental")] + fn offchain_worker(_block_number: BlockNumberFor) { + if let Some(key) = Numbers::::iter_keys().next() { + // Create a valid task + let task = Task::::AddNumberIntoTotal { i: key }; + let runtime_task = ::RuntimeTask::from(task); + let call = frame_system::Call::::do_task { task: runtime_task.into() }; + + // Submit the task as an unsigned transaction + let res = + SubmitTransaction::>::submit_unsigned_transaction( + call.into(), + ); + match res { + Ok(_) => log::info!(target: LOG_TARGET, "Submitted the task."), + Err(e) => log::error!(target: LOG_TARGET, "Error submitting task: {:?}", e), + } + } + } + } + #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeTask: frame_support::traits::Task; + pub trait Config: + SendTransactionTypes> + frame_system::Config + { + type RuntimeTask: frame_support::traits::Task + + IsType<::RuntimeTask> + + From>; type WeightInfo: WeightInfo; } diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 76ac9e76bff8..33912bb5269c 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -20,6 +20,7 @@ use crate::{self as tasks_example}; use frame_support::derive_impl; +use sp_runtime::testing::TestXt; pub type AccountId = u32; pub type Balance = u32; @@ -32,12 +33,32 @@ frame_support::construct_runtime!( } ); +pub type Extrinsic = TestXt; + #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Runtime { type Block = Block; } +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = Extrinsic; +} + impl tasks_example::Config for Runtime { type RuntimeTask = RuntimeTask; type WeightInfo = (); } + +pub fn advance_to(b: u64) { + #[cfg(feature = "experimental")] + use frame_support::traits::Hooks; + while System::block_number() < b { + System::set_block_number(System::block_number() + 1); + #[cfg(feature = "experimental")] + TasksExample::offchain_worker(System::block_number()); + } +} diff --git a/substrate/frame/examples/tasks/src/tests.rs b/substrate/frame/examples/tasks/src/tests.rs index fc3c69f4aef9..6c8acb0194bd 100644 --- a/substrate/frame/examples/tasks/src/tests.rs +++ b/substrate/frame/examples/tasks/src/tests.rs @@ -19,7 +19,11 @@ #![cfg(test)] use crate::{mock::*, Numbers}; +#[cfg(feature = "experimental")] +use codec::Decode; use frame_support::traits::Task; +#[cfg(feature = "experimental")] +use sp_core::offchain::{testing, OffchainWorkerExt, TransactionPoolExt}; use sp_runtime::BuildStorage; #[cfg(feature = "experimental")] @@ -130,3 +134,29 @@ fn task_execution_fails_for_invalid_task() { ); }); } + +#[cfg(feature = "experimental")] +#[test] +fn task_with_offchain_worker() { + let (offchain, _offchain_state) = testing::TestOffchainExt::new(); + let (pool, pool_state) = testing::TestTransactionPoolExt::new(); + + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainWorkerExt::new(offchain)); + t.register_extension(TransactionPoolExt::new(pool)); + + t.execute_with(|| { + advance_to(1); + assert!(pool_state.read().transactions.is_empty()); + + Numbers::::insert(0, 10); + assert_eq!(crate::Total::::get(), (0, 0)); + + advance_to(2); + + let tx = pool_state.write().transactions.pop().unwrap(); + assert!(pool_state.read().transactions.is_empty()); + let tx = Extrinsic::decode(&mut &*tx).unwrap(); + assert_eq!(tx.signature, None); + }); +} diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 984a7f7537fe..7eddea1259d7 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2465,6 +2465,9 @@ pub mod pallet_macros { /// Finally, the `RuntimeTask` can then used by a script or off-chain worker to create and /// submit such tasks via an extrinsic defined in `frame_system` called `do_task`. /// + /// When submitted as unsigned transactions (for example via an off-chain workder), note + /// that the tasks will be executed in a random order. + /// /// ## Example #[doc = docify::embed!("src/tests/tasks.rs", tasks_example)] /// Now, this can be executed as follows: diff --git a/substrate/frame/support/src/traits/tasks.rs b/substrate/frame/support/src/traits/tasks.rs index 24f3430cf50b..42b837e55970 100644 --- a/substrate/frame/support/src/traits/tasks.rs +++ b/substrate/frame/support/src/traits/tasks.rs @@ -46,6 +46,10 @@ pub trait Task: Sized + FullCodec + TypeInfo + Clone + Debug + PartialEq + Eq { fn iter() -> Self::Enumeration; /// Checks if a particular instance of this `Task` variant is a valid piece of work. + /// + /// This is used to validate tasks for unsigned execution. Hence, it MUST be cheap + /// with minimal to no storage reads. Else, it can make the blockchain vulnerable + /// to DoS attacks. fn is_valid(&self) -> bool; /// Performs the work for this particular `Task` variant. diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 184f27b61ed2..30df4dcfd43e 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -741,9 +741,7 @@ pub mod pallet { #[cfg(feature = "experimental")] #[pallet::call_index(8)] #[pallet::weight(task.weight())] - pub fn do_task(origin: OriginFor, task: T::RuntimeTask) -> DispatchResultWithPostInfo { - ensure_signed(origin)?; - + pub fn do_task(_origin: OriginFor, task: T::RuntimeTask) -> DispatchResultWithPostInfo { if !task.is_valid() { return Err(Error::::InvalidTask.into()) } @@ -1032,6 +1030,18 @@ pub mod pallet { }) } } + #[cfg(feature = "experimental")] + if let Call::do_task { ref task } = call { + if task.is_valid() { + return Ok(ValidTransaction { + priority: u64::max_value(), + requires: Vec::new(), + provides: vec![T::Hashing::hash_of(&task.encode()).as_ref().to_vec()], + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } + } Err(InvalidTransaction::Call.into()) } } From 9a0049d0da59b8b842f64fae441b34dba3408430 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Wed, 24 Apr 2024 09:15:39 +0300 Subject: [PATCH 45/74] Plumbing to increase pvf workers configuration based on chain id (#4252) Part of https://github.com/paritytech/polkadot-sdk/issues/4126 we want to safely increase the execute_workers_max_num gradually from chain to chain and assess if there are any negative impacts. This PR performs the necessary plumbing to be able to increase it based on the chain id, it increase the number of execution workers from 2 to 4 on test network but lives kusama and polkadot unchanged until we gather more data. --------- Signed-off-by: Alexandru Gheorghe --- .../src/lib.rs | 3 +++ polkadot/cli/src/cli.rs | 17 ++++++++++++++++ polkadot/cli/src/command.rs | 3 +++ .../node/core/candidate-validation/src/lib.rs | 13 ++++++++++++ .../benches/host_prepare_rococo_runtime.rs | 3 +++ polkadot/node/core/pvf/src/host.rs | 9 ++++++--- polkadot/node/core/pvf/tests/it/main.rs | 3 +++ polkadot/node/service/src/lib.rs | 20 +++++++++++++++++++ polkadot/node/test/service/src/lib.rs | 6 ++++++ .../adder/collator/src/main.rs | 3 +++ .../undying/collator/src/main.rs | 3 +++ prdoc/pr_4252.prdoc | 15 ++++++++++++++ 12 files changed, 95 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_4252.prdoc diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 6ea02b2e7c1f..578b942776dc 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -312,6 +312,9 @@ fn build_polkadot_full_node( overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, )?; diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 3737942e6e53..3e5a6ccdd3c2 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -131,6 +131,23 @@ pub struct RunCmd { #[arg(long, value_name = "PATH")] pub workers_path: Option, + /// Override the maximum number of pvf execute workers. + /// + /// **Dangerous!** Do not touch unless explicitly advised to. + #[arg(long)] + pub execute_workers_max_num: Option, + /// Override the maximum number of pvf workers that can be spawned in the pvf prepare + /// pool for tasks with the priority below critical. + /// + /// **Dangerous!** Do not touch unless explicitly advised to. + + #[arg(long)] + pub prepare_workers_soft_max_num: Option, + /// Override the absolute number of pvf workers that can be spawned in the pvf prepare pool. + /// + /// **Dangerous!** Do not touch unless explicitly advised to. + #[arg(long)] + pub prepare_workers_hard_max_num: Option, /// TESTING ONLY: disable the version check between nodes and workers. #[arg(long, hide = true)] pub disable_worker_version_check: bool, diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 6af93a756388..f5ee538e8cec 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -253,6 +253,9 @@ where .overseer_channel_capacity_override, malus_finality_delay: maybe_malus_finality_delay, hwbench, + execute_workers_max_num: cli.run.execute_workers_max_num, + prepare_workers_hard_max_num: cli.run.prepare_workers_hard_max_num, + prepare_workers_soft_max_num: cli.run.prepare_workers_soft_max_num, }, ) .map(|full| full.task_manager)?; diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 8663dc43835a..08881dad1961 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -100,6 +100,13 @@ pub struct Config { pub prep_worker_path: PathBuf, /// Path to the execution worker binary pub exec_worker_path: PathBuf, + /// The maximum number of pvf execution workers. + pub pvf_execute_workers_max_num: usize, + /// The maximum number of pvf workers that can be spawned in the pvf prepare pool for tasks + /// with the priority below critical. + pub pvf_prepare_workers_soft_max_num: usize, + /// The absolute number of pvf workers that can be spawned in the pvf prepare pool. + pub pvf_prepare_workers_hard_max_num: usize, } /// The candidate validation subsystem. @@ -224,6 +231,9 @@ async fn run( secure_validator_mode, prep_worker_path, exec_worker_path, + pvf_execute_workers_max_num, + pvf_prepare_workers_soft_max_num, + pvf_prepare_workers_hard_max_num, }: Config, ) -> SubsystemResult<()> { let (validation_host, task) = polkadot_node_core_pvf::start( @@ -233,6 +243,9 @@ async fn run( secure_validator_mode, prep_worker_path, exec_worker_path, + pvf_execute_workers_max_num, + pvf_prepare_workers_soft_max_num, + pvf_prepare_workers_hard_max_num, ), pvf_metrics, ) diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index 2aea21361a3e..97a03e6596d1 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -48,6 +48,9 @@ impl TestHost { false, prepare_worker_path, execute_worker_path, + 2, + 1, + 2, ); f(&mut config); let (host, task) = start(config, Metrics::default()).await.unwrap(); diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 2d180fc59295..4065598a3ac4 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -188,6 +188,9 @@ impl Config { secure_validator_mode: bool, prepare_worker_program_path: PathBuf, execute_worker_program_path: PathBuf, + execute_workers_max_num: usize, + prepare_workers_soft_max_num: usize, + prepare_workers_hard_max_num: usize, ) -> Self { Self { cache_path, @@ -196,12 +199,12 @@ impl Config { prepare_worker_program_path, prepare_worker_spawn_timeout: Duration::from_secs(3), - prepare_workers_soft_max_num: 1, - prepare_workers_hard_max_num: 2, + prepare_workers_soft_max_num, + prepare_workers_hard_max_num, execute_worker_program_path, execute_worker_spawn_timeout: Duration::from_secs(3), - execute_workers_max_num: 2, + execute_workers_max_num, } } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 16ef23c69cad..56cc681aff38 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -63,6 +63,9 @@ impl TestHost { false, prepare_worker_path, execute_worker_path, + 2, + 1, + 2, ); f(&mut config); let (host, task) = start(config, Metrics::default()).await.unwrap(); diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 22231c84b1d9..e5c29172099b 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -643,6 +643,13 @@ pub struct NewFullParams { pub workers_path: Option, /// Optional custom names for the prepare and execute workers. pub workers_names: Option<(String, String)>, + /// An optional number of the maximum number of pvf execute workers. + pub execute_workers_max_num: Option, + /// An optional maximum number of pvf workers that can be spawned in the pvf prepare pool for + /// tasks with the priority below critical. + pub prepare_workers_soft_max_num: Option, + /// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool. + pub prepare_workers_hard_max_num: Option, pub overseer_gen: OverseerGenerator, pub overseer_message_channel_capacity_override: Option, #[allow(dead_code)] @@ -738,6 +745,9 @@ pub fn new_full< overseer_message_channel_capacity_override, malus_finality_delay: _malus_finality_delay, hwbench, + execute_workers_max_num, + prepare_workers_soft_max_num, + prepare_workers_hard_max_num, }: NewFullParams, ) -> Result { use polkadot_node_network_protocol::request_response::IncomingRequest; @@ -943,6 +953,16 @@ pub fn new_full< secure_validator_mode, prep_worker_path, exec_worker_path, + pvf_execute_workers_max_num: execute_workers_max_num.unwrap_or_else( + || match config.chain_spec.identify_chain() { + // The intention is to use this logic for gradual increasing from 2 to 4 + // of this configuration chain by chain untill it reaches production chain. + Chain::Polkadot | Chain::Kusama => 2, + Chain::Rococo | Chain::Westend | Chain::Unknown => 4, + }, + ), + pvf_prepare_workers_soft_max_num: prepare_workers_soft_max_num.unwrap_or(1), + pvf_prepare_workers_hard_max_num: prepare_workers_hard_max_num.unwrap_or(2), }) } else { None diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index d313c1933348..87fbc7c20f31 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -97,6 +97,9 @@ pub fn new_full( overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ), sc_network::config::NetworkBackendType::Litep2p => @@ -116,6 +119,9 @@ pub fn new_full( overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ), } diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index fec90fc41cdb..e8588274df27 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -95,6 +95,9 @@ fn main() -> Result<()> { overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 45f21e7b8596..7198a831a477 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -97,6 +97,9 @@ fn main() -> Result<()> { overseer_message_channel_capacity_override: None, malus_finality_delay: None, hwbench: None, + execute_workers_max_num: None, + prepare_workers_hard_max_num: None, + prepare_workers_soft_max_num: None, }, ) .map_err(|e| e.to_string())?; diff --git a/prdoc/pr_4252.prdoc b/prdoc/pr_4252.prdoc new file mode 100644 index 000000000000..22987b46845d --- /dev/null +++ b/prdoc/pr_4252.prdoc @@ -0,0 +1,15 @@ +title: "Add logic to increase pvf worker based on chain" + +doc: + - audience: Node Operator + description: | + A new logic and cli parameters were added to allow increasing the number of pvf + workers based on the chain-id. + +crates: + - name: polkadot-node-core-candidate-validation + bump: minor + - name: polkadot-cli + bump: minor + - name: polkadot-service + bump: minor From e0584a153df63ff138d12764085422ed06de548a Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 24 Apr 2024 11:44:42 +0300 Subject: [PATCH 46/74] pallet-xcm::transfer_assets_using_type() supports custom actions on destination (#4260) Change `transfer_assets_using_type()` to not assume `DepositAssets` as the intended use of the assets on the destination. Instead provides the caller with the ability to specify custom XCM that be executed on `dest` chain as the last step of the transfer, thus allowing custom usecases for the transferred assets. E.g. some are used/swapped/etc there, while some are sent further to yet another chain. Note: this is a follow-up on https://github.com/paritytech/polkadot-sdk/pull/3695, bringing in an API change for `transfer_assets_using_type()`. This is ok as the previous version has not been yet released. Thus, its first release will include the new API proposed by this PR. This allows usecases such as: https://forum.polkadot.network/t/managing-sas-on-multiple-reserve-chains-for-same-asset/7538/4 BTW: all this pallet-xcm asset transfers code will be massively reduced once we have https://github.com/paritytech/xcm-format/pull/54 --------- Signed-off-by: Adrian Catangiu --- .../tests/assets/asset-hub-rococo/src/lib.rs | 5 +- ...ssets_transfers.rs => hybrid_transfers.rs} | 203 +++++++++++++++++- .../assets/asset-hub-rococo/src/tests/mod.rs | 2 +- .../src/tests/reserve_transfer.rs | 2 +- .../tests/assets/asset-hub-westend/src/lib.rs | 5 +- ...ssets_transfers.rs => hybrid_transfers.rs} | 203 +++++++++++++++++- .../assets/asset-hub-westend/src/tests/mod.rs | 2 +- .../src/tests/reserve_transfer.rs | 2 +- .../src/tests/asset_transfers.rs | 8 +- .../src/tests/asset_transfers.rs | 8 +- polkadot/xcm/pallet-xcm/src/lib.rs | 196 +++++++++-------- prdoc/pr_3695.prdoc | 9 +- 12 files changed, 530 insertions(+), 115 deletions(-) rename cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/{foreign_assets_transfers.rs => hybrid_transfers.rs} (76%) rename cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/{foreign_assets_transfers.rs => hybrid_transfers.rs} (76%) diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 322c6cf1f228..2bd388bee400 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -70,7 +70,9 @@ mod imports { LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, }; - pub use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; + pub use rococo_runtime::xcm_config::{ + UniversalLocation as RococoUniversalLocation, XcmConfig as RococoXcmConfig, + }; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; @@ -83,6 +85,7 @@ mod imports { pub type ParaToSystemParaTest = Test; pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; + pub type RelayToParaThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs similarity index 76% rename from cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs index 6bdf89e6f277..edaaa998a9ca 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/foreign_assets_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/hybrid_transfers.rs @@ -54,14 +54,18 @@ fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::LocalReserve), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -69,14 +73,18 @@ fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { fn para_to_ah_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::DestinationReserve), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -85,14 +93,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubRococo::para_id()); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), bx!(fee.id.into()), bx!(TransferType::RemoteReserve(asset_hub_location.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -100,14 +112,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -115,14 +131,18 @@ fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> Dispatc fn asset_hub_to_para_teleport_foreign_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -626,3 +646,166 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici asset_hub_to_para_teleport_foreign_assets, ); } + +// =============================================================== +// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== +// =============================================================== +/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid +/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their +/// Sovereign Account on Asset Hub. +#[test] +fn transfer_native_asset_from_relay_to_para_through_asset_hub() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalA::para_id()); + let sender = RococoSender::get(); + let amount_to_send: Balance = ROCOCO_ED * 1000; + + // Init values for Parachain + let relay_native_asset_location = RelayLocation::get(); + let receiver = PenpalAReceiver::get(); + + // Init Test + let test_args = TestContext { + sender, + receiver: receiver.clone(), + args: TestArgs::new_relay(destination.clone(), receiver.clone(), amount_to_send), + }; + let mut test = RelayToParaThroughAHTest::new(test_args); + + let sov_penpal_on_ah = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + // Query initial balances + let sender_balance_before = test.sender.balance; + let sov_penpal_on_ah_before = AssetHubRococo::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &receiver) + }); + + fn relay_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + Rococo::assert_xcm_pallet_attempted_complete(None); + assert_expected_events!( + Rococo, + vec![ + // Amount to teleport is withdrawn from Sender + RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + // Amount to teleport is deposited in Relay's `CheckAccount` + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { + who: *who == ::XcmPallet::check_account(), + amount: *amount == t.args.amount, + }, + ] + ); + } + fn asset_hub_assertions(_: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ah = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + assert_expected_events!( + AssetHubRococo, + vec![ + // Deposited to receiver parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Minted { who, .. } + ) => { + who: *who == sov_penpal_on_ah, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } + fn penpal_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let expected_id = + t.args.assets.into_inner().first().unwrap().id.0.clone().try_into().unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } + fn transfer_assets_dispatchable(t: RelayToParaThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location = Rococo::child_location_of(AssetHubRococo::para_id()); + let context = RococoUniversalLocation::get(); + + // reanchor fees to the view of destination (Penpal) + let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); + if let Fungible(ref mut amount) = remote_fees.fun { + // we already spent some fees along the way, just use half of what we started with + *amount = *amount / 2; + } + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, + DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }, + ]); + + // reanchor final dest (Penpal) to the view of hop (Asset Hub) + let mut dest = t.args.dest.clone(); + dest.reanchor(&asset_hub_location, &context).unwrap(); + // on Asset Hub, forward assets to Penpal + let xcm_on_hop = Xcm::<()>(vec![DepositReserveAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + dest, + xcm: xcm_on_final_dest, + }]); + + // First leg is a teleport, from there a local-reserve-transfer to final dest + ::XcmPallet::transfer_assets_using_type_and_then( + t.signed_origin, + bx!(asset_hub_location.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::Teleport), + bx!(VersionedXcm::from(xcm_on_hop)), + t.args.weight_limit, + ) + } + + // Set assertions and dispatchables + test.set_assertion::(relay_assertions); + test.set_assertion::(asset_hub_assertions); + test.set_assertion::(penpal_assertions); + test.set_dispatchable::(transfer_assets_dispatchable); + test.assert(); + + // Query final balances + let sender_balance_after = test.sender.balance; + let sov_penpal_on_ah_after = AssetHubRococo::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); + // SA on AH balance is increased + assert!(sov_penpal_on_ah_after > sov_penpal_on_ah_before); + // Receiver's asset balance is increased + assert!(receiver_assets_after > receiver_assets_before); + // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_assets_after < receiver_assets_before + amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs index 346af3082384..138ce419757b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod foreign_assets_transfers; +mod hybrid_transfers; mod reserve_transfer; mod send; mod set_xcm_versions; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 5aef70f5cbfc..8b9fedcd4947 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -574,7 +574,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender = RococoSender::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; - // Init values fot Parachain + // Init values for Parachain let relay_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index e687251c14f9..1c4a0ef4c8d2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -74,7 +74,9 @@ mod imports { LocalReservableFromAssetHub as PenpalLocalReservableFromAssetHub, LocalTeleportableToAssetHub as PenpalLocalTeleportableToAssetHub, }; - pub use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; + pub use westend_runtime::xcm_config::{ + UniversalLocation as WestendUniversalLocation, XcmConfig as WestendXcmConfig, + }; pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; @@ -87,6 +89,7 @@ mod imports { pub type ParaToSystemParaTest = Test; pub type ParaToParaThroughRelayTest = Test; pub type ParaToParaThroughAHTest = Test; + pub type RelayToParaThroughAHTest = Test; } #[cfg(test)] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs similarity index 76% rename from cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs index 8cfda37c84c9..d39c72c7c5f0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/foreign_assets_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/hybrid_transfers.rs @@ -54,14 +54,18 @@ fn para_to_para_assethub_hop_assertions(t: ParaToParaThroughAHTest) { fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::LocalReserve), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -69,14 +73,18 @@ fn ah_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { fn para_to_ah_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::DestinationReserve), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -85,14 +93,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), bx!(fee.id.into()), bx!(TransferType::RemoteReserve(asset_hub_location.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -100,14 +112,18 @@ fn para_to_para_transfer_assets_through_ah(t: ParaToParaThroughAHTest) -> Dispat fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::DestinationReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -115,14 +131,18 @@ fn para_to_asset_hub_teleport_foreign_assets(t: ParaToSystemParaTest) -> Dispatc fn asset_hub_to_para_teleport_foreign_assets(t: SystemParaToParaTest) -> DispatchResult { let fee_idx = t.args.fee_asset_item as usize; let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); - ::PolkadotXcm::transfer_assets_using_type( + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }]); + ::PolkadotXcm::transfer_assets_using_type_and_then( t.signed_origin, bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), bx!(TransferType::Teleport), bx!(fee.id.into()), bx!(TransferType::LocalReserve), + bx!(VersionedXcm::from(custom_xcm_on_dest)), t.args.weight_limit, ) } @@ -627,3 +647,166 @@ fn bidirectional_teleport_foreign_asset_between_para_and_asset_hub_using_explici asset_hub_to_para_teleport_foreign_assets, ); } + +// =============================================================== +// ===== Transfer - Native Asset - Relay->AssetHub->Parachain ==== +// =============================================================== +/// Transfers of native asset Relay to Parachain (using AssetHub reserve). Parachains want to avoid +/// managing SAs on all system chains, thus want all their DOT-in-reserve to be held in their +/// Sovereign Account on Asset Hub. +#[test] +fn transfer_native_asset_from_relay_to_para_through_asset_hub() { + // Init values for Relay + let destination = Westend::child_location_of(PenpalA::para_id()); + let sender = WestendSender::get(); + let amount_to_send: Balance = WESTEND_ED * 1000; + + // Init values for Parachain + let relay_native_asset_location = RelayLocation::get(); + let receiver = PenpalAReceiver::get(); + + // Init Test + let test_args = TestContext { + sender, + receiver: receiver.clone(), + args: TestArgs::new_relay(destination.clone(), receiver.clone(), amount_to_send), + }; + let mut test = RelayToParaThroughAHTest::new(test_args); + + let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + // Query initial balances + let sender_balance_before = test.sender.balance; + let sov_penpal_on_ah_before = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah.clone()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location.clone(), &receiver) + }); + + fn relay_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + Westend::assert_xcm_pallet_attempted_complete(None); + assert_expected_events!( + Westend, + vec![ + // Amount to teleport is withdrawn from Sender + RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + // Amount to teleport is deposited in Relay's `CheckAccount` + RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { + who: *who == ::XcmPallet::check_account(), + amount: *amount == t.args.amount, + }, + ] + ); + } + fn asset_hub_assertions(_: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let sov_penpal_on_ah = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalA::para_id()), + ); + assert_expected_events!( + AssetHubWestend, + vec![ + // Deposited to receiver parachain SA + RuntimeEvent::Balances( + pallet_balances::Event::Minted { who, .. } + ) => { + who: *who == sov_penpal_on_ah, + }, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + } + fn penpal_assertions(t: RelayToParaThroughAHTest) { + type RuntimeEvent = ::RuntimeEvent; + let expected_id = + t.args.assets.into_inner().first().unwrap().id.0.clone().try_into().unwrap(); + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == expected_id, + owner: *owner == t.receiver.account_id, + }, + ] + ); + } + fn transfer_assets_dispatchable(t: RelayToParaThroughAHTest) -> DispatchResult { + let fee_idx = t.args.fee_asset_item as usize; + let fee: Asset = t.args.assets.inner().get(fee_idx).cloned().unwrap(); + let asset_hub_location = Westend::child_location_of(AssetHubWestend::para_id()); + let context = WestendUniversalLocation::get(); + + // reanchor fees to the view of destination (Penpal) + let mut remote_fees = fee.clone().reanchored(&t.args.dest, &context).unwrap(); + if let Fungible(ref mut amount) = remote_fees.fun { + // we already spent some fees along the way, just use half of what we started with + *amount = *amount / 2; + } + let xcm_on_final_dest = Xcm::<()>(vec![ + BuyExecution { fees: remote_fees, weight_limit: t.args.weight_limit.clone() }, + DepositAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + beneficiary: t.args.beneficiary, + }, + ]); + + // reanchor final dest (Penpal) to the view of hop (Asset Hub) + let mut dest = t.args.dest.clone(); + dest.reanchor(&asset_hub_location, &context).unwrap(); + // on Asset Hub, forward assets to Penpal + let xcm_on_hop = Xcm::<()>(vec![DepositReserveAsset { + assets: Wild(AllCounted(t.args.assets.len() as u32)), + dest, + xcm: xcm_on_final_dest, + }]); + + // First leg is a teleport, from there a local-reserve-transfer to final dest + ::XcmPallet::transfer_assets_using_type_and_then( + t.signed_origin, + bx!(asset_hub_location.into()), + bx!(t.args.assets.into()), + bx!(TransferType::Teleport), + bx!(fee.id.into()), + bx!(TransferType::Teleport), + bx!(VersionedXcm::from(xcm_on_hop)), + t.args.weight_limit, + ) + } + + // Set assertions and dispatchables + test.set_assertion::(relay_assertions); + test.set_assertion::(asset_hub_assertions); + test.set_assertion::(penpal_assertions); + test.set_dispatchable::(transfer_assets_dispatchable); + test.assert(); + + // Query final balances + let sender_balance_after = test.sender.balance; + let sov_penpal_on_ah_after = AssetHubWestend::execute_with(|| { + ::Balances::free_balance(sov_penpal_on_ah) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type ForeignAssets = ::ForeignAssets; + >::balance(relay_native_asset_location, &receiver) + }); + + // Sender's balance is reduced by amount sent plus delivery fees + assert!(sender_balance_after < sender_balance_before - amount_to_send); + // SA on AH balance is increased + assert!(sov_penpal_on_ah_after > sov_penpal_on_ah_before); + // Receiver's asset balance is increased + assert!(receiver_assets_after > receiver_assets_before); + // Receiver's asset balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_assets_after < receiver_assets_before + amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs index e463e21e9e52..bf013697b4c7 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs @@ -14,7 +14,7 @@ // limitations under the License. mod fellowship_treasury; -mod foreign_assets_transfers; +mod hybrid_transfers; mod reserve_transfer; mod send; mod set_xcm_versions; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index df01eb0d48ad..65d013a0eec4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -574,7 +574,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let sender = WestendSender::get(); let amount_to_send: Balance = WESTEND_ED * 1000; - // Init values fot Parachain + // Init values for Parachain let relay_native_asset_location = RelayLocation::get(); let receiver = PenpalAReceiver::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 69d625be2804..87fb70e4de23 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -60,15 +60,19 @@ fn send_asset_from_penpal_rococo_through_local_asset_hub_to_westend_asset_hub( AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); let assets: Assets = (id.clone(), transfer_amount).into(); let fees_id: AssetId = id.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); - ::PolkadotXcm::transfer_assets_using_type( + ::PolkadotXcm::transfer_assets_using_type_and_then( signed_origin, bx!(destination.into()), - bx!(beneficiary.into()), bx!(assets.clone().into()), bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), bx!(fees_id.into()), bx!(TransferType::RemoteReserve(local_asset_hub.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), WeightLimit::Unlimited, ) })); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 3a8ce7d43f3e..597e77d9049c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -59,15 +59,19 @@ fn send_asset_from_penpal_westend_through_local_asset_hub_to_rococo_asset_hub( AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); let assets: Assets = (id.clone(), transfer_amount).into(); let fees_id: AssetId = id.into(); + let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { + assets: Wild(AllCounted(assets.len() as u32)), + beneficiary, + }]); - ::PolkadotXcm::transfer_assets_using_type( + ::PolkadotXcm::transfer_assets_using_type_and_then( signed_origin, bx!(destination.into()), - bx!(beneficiary.into()), bx!(assets.into()), bx!(TransferType::RemoteReserve(local_asset_hub.clone().into())), bx!(fees_id.into()), bx!(TransferType::RemoteReserve(local_asset_hub.into())), + bx!(VersionedXcm::from(custom_xcm_on_dest)), WeightLimit::Unlimited, ) })); diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 698ec6998b49..f6c301d5b04e 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -45,7 +45,7 @@ use sp_runtime::{ AccountIdConversion, BadOrigin, BlakeTwo256, BlockNumberProvider, Dispatchable, Hash, Saturating, Zero, }, - RuntimeDebug, + Either, RuntimeDebug, }; use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; @@ -1311,7 +1311,7 @@ pub mod pallet { Self::do_transfer_assets( origin, dest, - beneficiary, + Either::Left(beneficiary), assets, assets_transfer_type, fee_asset_item, @@ -1421,50 +1421,60 @@ pub mod pallet { /// - `TransferType::Teleport`: burn local assets and forward XCM to `dest` chain to /// mint/teleport assets and deposit them to `beneficiary`. /// - /// Fee payment on the source, destination and all intermediary hops, is specified through - /// `fees_id`, but make sure enough of the specified `fees_id` asset is included in the - /// given list of `assets`. `fees_id` should be enough to pay for `weight_limit`. If more - /// weight is needed than `weight_limit`, then the operation will fail and the sent assets - /// may be at risk. + /// On the destination chain, as well as any intermediary hops, `BuyExecution` is used to + /// buy execution using transferred `assets` identified by `remote_fees_id`. + /// Make sure enough of the specified `remote_fees_id` asset is included in the given list + /// of `assets`. `remote_fees_id` should be enough to pay for `weight_limit`. If more weight + /// is needed than `weight_limit`, then the operation will fail and the sent assets may be + /// at risk. + /// + /// `remote_fees_id` may use different transfer type than rest of `assets` and can be + /// specified through `fees_transfer_type`. /// - /// `fees_id` may use different transfer type than rest of `assets` and can be specified - /// through `fees_transfer_type`. + /// The caller needs to specify what should happen to the transferred assets once they reach + /// the `dest` chain. This is done through the `custom_xcm_on_dest` parameter, which + /// contains the instructions to execute on `dest` as a final step. + /// This is usually as simple as: + /// `Xcm(vec![DepositAsset { assets: Wild(AllCounted(assets.len())), beneficiary }])`, + /// but could be something more exotic like sending the `assets` even further. /// /// - `origin`: Must be capable of withdrawing the `assets` and executing XCM. /// - `dest`: Destination context for the assets. Will typically be `[Parent, /// Parachain(..)]` to send from parachain to parachain, or `[Parachain(..)]` to send from /// relay to parachain, or `(parents: 2, (GlobalConsensus(..), ..))` to send from /// parachain across a bridge to another ecosystem destination. - /// - `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will - /// generally be an `AccountId32` value. /// - `assets`: The assets to be withdrawn. This should include the assets used to pay the /// fee on the `dest` (and possibly reserve) chains. /// - `assets_transfer_type`: The XCM `TransferType` used to transfer the `assets`. - /// - `fees_id`: One of the included `assets` to be be used to pay fees. + /// - `remote_fees_id`: One of the included `assets` to be be used to pay fees. /// - `fees_transfer_type`: The XCM `TransferType` used to transfer the `fees` assets. + /// - `custom_xcm_on_dest`: The XCM to be executed on `dest` chain as the last step of the + /// transfer, which also determines what happens to the assets on the destination chain. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. #[pallet::call_index(15)] #[pallet::weight(T::WeightInfo::transfer_assets())] - pub fn transfer_assets_using_type( + pub fn transfer_assets_using_type_and_then( origin: OriginFor, dest: Box, - beneficiary: Box, assets: Box, assets_transfer_type: Box, - fees_id: Box, + remote_fees_id: Box, fees_transfer_type: Box, + custom_xcm_on_dest: Box>, weight_limit: WeightLimit, ) -> DispatchResult { let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; let dest: Location = (*dest).try_into().map_err(|()| Error::::BadVersion)?; - let beneficiary: Location = - (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: Assets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; - let fees_id: AssetId = (*fees_id).try_into().map_err(|()| Error::::BadVersion)?; + let fees_id: AssetId = + (*remote_fees_id).try_into().map_err(|()| Error::::BadVersion)?; + let remote_xcm: Xcm<()> = + (*custom_xcm_on_dest).try_into().map_err(|()| Error::::BadVersion)?; log::debug!( - target: "xcm::pallet_xcm::transfer_assets_using_type", - "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?} through {:?}, fees-id {:?} through {:?}", - origin_location, dest, beneficiary, assets, assets_transfer_type, fees_id, fees_transfer_type, + target: "xcm::pallet_xcm::transfer_assets_using_type_and_then", + "origin {origin_location:?}, dest {dest:?}, assets {assets:?} through {assets_transfer_type:?}, \ + remote_fees_id {fees_id:?} through {fees_transfer_type:?}, \ + custom_xcm_on_dest {remote_xcm:?}, weight-limit {weight_limit:?}", ); let assets = assets.into_inner(); @@ -1475,7 +1485,7 @@ pub mod pallet { Self::do_transfer_assets( origin_location, dest, - beneficiary, + Either::Right(remote_xcm), assets, *assets_transfer_type, fee_asset_index, @@ -1650,7 +1660,7 @@ impl Pallet { let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( origin.clone(), dest.clone(), - beneficiary, + Either::Left(beneficiary), assets, assets_transfer_type, FeesHandling::Batched { fees }, @@ -1692,7 +1702,7 @@ impl Pallet { let (local_xcm, remote_xcm) = Self::build_xcm_transfer_type( origin_location.clone(), dest.clone(), - beneficiary, + Either::Left(beneficiary), assets, TransferType::Teleport, FeesHandling::Batched { fees }, @@ -1704,7 +1714,7 @@ impl Pallet { fn do_transfer_assets( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, mut assets: Vec, assets_transfer_type: TransferType, fee_asset_index: usize, @@ -1770,7 +1780,7 @@ impl Pallet { fn build_xcm_transfer_type( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, transfer_type: TransferType, fees: FeesHandling, @@ -1782,57 +1792,51 @@ impl Pallet { fees_handling {:?}, weight_limit: {:?}", origin, dest, beneficiary, assets, transfer_type, fees, weight_limit, ); - Ok(match transfer_type { - TransferType::LocalReserve => { - let (local, remote) = Self::local_reserve_transfer_programs( - origin.clone(), - dest.clone(), - beneficiary, - assets, - fees, - weight_limit, - )?; - (local, Some(remote)) - }, - TransferType::DestinationReserve => { - let (local, remote) = Self::destination_reserve_transfer_programs( - origin.clone(), - dest.clone(), - beneficiary, - assets, - fees, - weight_limit, - )?; - (local, Some(remote)) - }, + match transfer_type { + TransferType::LocalReserve => Self::local_reserve_transfer_programs( + origin.clone(), + dest.clone(), + beneficiary, + assets, + fees, + weight_limit, + ) + .map(|(local, remote)| (local, Some(remote))), + TransferType::DestinationReserve => Self::destination_reserve_transfer_programs( + origin.clone(), + dest.clone(), + beneficiary, + assets, + fees, + weight_limit, + ) + .map(|(local, remote)| (local, Some(remote))), TransferType::RemoteReserve(reserve) => { let fees = match fees { FeesHandling::Batched { fees } => fees, _ => return Err(Error::::InvalidAssetUnsupportedReserve.into()), }; - let local = Self::remote_reserve_transfer_program( + Self::remote_reserve_transfer_program( origin.clone(), reserve.try_into().map_err(|()| Error::::BadVersion)?, - dest.clone(), beneficiary, - assets, - fees, - weight_limit, - )?; - (local, None) - }, - TransferType::Teleport => { - let (local, remote) = Self::teleport_assets_program( - origin.clone(), dest.clone(), - beneficiary, assets, fees, weight_limit, - )?; - (local, Some(remote)) + ) + .map(|local| (local, None)) }, - }) + TransferType::Teleport => Self::teleport_assets_program( + origin.clone(), + dest.clone(), + beneficiary, + assets, + fees, + weight_limit, + ) + .map(|(local, remote)| (local, Some(remote))), + } } fn execute_xcm_transfer( @@ -1947,7 +1951,7 @@ impl Pallet { fn local_reserve_transfer_programs( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, fees: FeesHandling, weight_limit: WeightLimit, @@ -1980,10 +1984,16 @@ impl Pallet { ]); // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest - .inner_mut() - .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_remote_xcm = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_remote_xcm.into_iter()); Ok((local_execute_xcm, xcm_on_dest)) } @@ -2022,7 +2032,7 @@ impl Pallet { fn destination_reserve_transfer_programs( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, fees: FeesHandling, weight_limit: WeightLimit, @@ -2058,10 +2068,15 @@ impl Pallet { // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest - .inner_mut() - .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_remote_xcm = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_remote_xcm.into_iter()); Ok((local_execute_xcm, xcm_on_dest)) } @@ -2070,8 +2085,8 @@ impl Pallet { fn remote_reserve_transfer_program( origin: Location, reserve: Location, + beneficiary: Either>, dest: Location, - beneficiary: Location, assets: Vec, fees: Asset, weight_limit: WeightLimit, @@ -2096,10 +2111,17 @@ impl Pallet { // identifies `dest` as seen by `reserve` let dest = dest.reanchored(&reserve, &context).map_err(|_| Error::::CannotReanchor)?; // xcm to be executed at dest - let xcm_on_dest = Xcm(vec![ - BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); + let mut xcm_on_dest = + Xcm(vec![BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }]); + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_xcm_on_dest = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_xcm_on_dest.into_iter()); // xcm to be executed on reserve let xcm_on_reserve = Xcm(vec![ BuyExecution { fees: reserve_fees, weight_limit }, @@ -2171,7 +2193,7 @@ impl Pallet { fn teleport_assets_program( origin: Location, dest: Location, - beneficiary: Location, + beneficiary: Either>, assets: Vec, fees: FeesHandling, weight_limit: WeightLimit, @@ -2231,10 +2253,16 @@ impl Pallet { ]); // handle fees Self::add_fees_to_xcm(dest, fees, weight_limit, &mut local_execute_xcm, &mut xcm_on_dest)?; - // deposit all remaining assets in holding to `beneficiary` location - xcm_on_dest - .inner_mut() - .push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + // Use custom XCM on remote chain, or just default to depositing everything to beneficiary. + let custom_remote_xcm = match beneficiary { + Either::Right(custom_xcm) => custom_xcm, + Either::Left(beneficiary) => { + // deposit all remaining assets in holding to `beneficiary` location + Xcm(vec![DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }]) + }, + }; + xcm_on_dest.0.extend(custom_remote_xcm.into_iter()); Ok((local_execute_xcm, xcm_on_dest)) } diff --git a/prdoc/pr_3695.prdoc b/prdoc/pr_3695.prdoc index 2c2c2b2e6917..cc54fb240cd0 100644 --- a/prdoc/pr_3695.prdoc +++ b/prdoc/pr_3695.prdoc @@ -6,7 +6,7 @@ title: "pallet-xcm: add new extrinsic for asset transfers using explicit reserve doc: - audience: Runtime User description: | - pallet-xcm has a new extrinsic `transfer_assets_using_type` for transferring + pallet-xcm has a new extrinsic `transfer_assets_using_type_and_then` for transferring assets from local chain to destination chain using an explicit XCM transfer types for transferring the assets and the fees: - `TransferType::LocalReserve`: transfer assets to sovereign account of destination @@ -33,6 +33,13 @@ doc: Same when transferring bridged assets back across the bridge, the local bridging parachain must be used as the explicit reserve location. + The new method takes a `custom_xcm_on_dest` parameter allowing the caller to specify + what should happen to the transferred assets once they reach + the `dest` chain. The `custom_xcm_on_dest` parameter should contains the instructions + to execute on `dest` as a final step. Usually as simple as: + `Xcm(vec![DepositAsset { assets: Wild(AllCounted(assets.len())), beneficiary }])`, + but could be something more exotic like sending the `assets` even further. + crates: - name: pallet-xcm bump: minor From c594b10a803e218f63c1bd97d2b27454efb4e852 Mon Sep 17 00:00:00 2001 From: Alexander Kalankhodzhaev Date: Wed, 24 Apr 2024 16:30:47 +0700 Subject: [PATCH 47/74] Remove unnecessary cloning (#4263) Seems like Externalities already [return a vector](https://github.com/paritytech/polkadot-sdk/blob/ffbce2a817ec2e7c8b7ce49f7ed6794584f19667/substrate/primitives/externalities/src/lib.rs#L86), so calling `to_vec` on a vector just results in an unneeded copying. Co-authored-by: Liam Aharon --- substrate/primitives/io/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index ec32b7290330..c8675a9a90bd 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -182,7 +182,7 @@ impl From for KillStorageResult { pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. fn get(&self, key: &[u8]) -> Option { - self.storage(key).map(|s| bytes::Bytes::from(s.to_vec())) + self.storage(key).map(bytes::Bytes::from) } /// Get `key` from storage, placing the value into `value_out` and return the number of From 8dc0b337889ef0d227c0a95681b340ee0e80a297 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Wed, 24 Apr 2024 16:26:25 +0300 Subject: [PATCH 48/74] [BEEFY] Return valid signatures when verifying commitment (#4259) Trying to split parts of the https://github.com/paritytech/polkadot-sdk/pull/1903 into smaller PRs For https://github.com/paritytech/polkadot-sdk/pull/1903 it would help if `verify_with_validator_set()` returned the list of valid authority-signatures pairs, since after the verification we need to send them in the equivocation proof. --- .../consensus/beefy/src/justification.rs | 57 ++++++--------- .../consensus/beefy/src/commitment.rs | 70 +++++++++++++++++-- .../primitives/consensus/beefy/src/lib.rs | 2 +- 3 files changed, 88 insertions(+), 41 deletions(-) diff --git a/substrate/client/consensus/beefy/src/justification.rs b/substrate/client/consensus/beefy/src/justification.rs index 7f1b9e5237c3..886368c9d7cb 100644 --- a/substrate/client/consensus/beefy/src/justification.rs +++ b/substrate/client/consensus/beefy/src/justification.rs @@ -16,12 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::keystore::BeefyKeystore; -use codec::{DecodeAll, Encode}; +use codec::DecodeAll; use sp_consensus::Error as ConsensusError; use sp_consensus_beefy::{ ecdsa_crypto::{AuthorityId, Signature}, - ValidatorSet, ValidatorSetId, VersionedFinalityProof, + BeefySignatureHasher, KnownSignature, ValidatorSet, ValidatorSetId, VersionedFinalityProof, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -45,46 +44,31 @@ pub(crate) fn decode_and_verify_finality_proof( ) -> Result, (ConsensusError, u32)> { let proof = >::decode_all(&mut &*encoded) .map_err(|_| (ConsensusError::InvalidJustification, 0))?; - verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) + verify_with_validator_set::(target_number, validator_set, &proof)?; + Ok(proof) } /// Verify the Beefy finality proof against the validator set at the block it was generated. -pub(crate) fn verify_with_validator_set( +pub(crate) fn verify_with_validator_set<'a, Block: BlockT>( target_number: NumberFor, - validator_set: &ValidatorSet, - proof: &BeefyVersionedFinalityProof, -) -> Result<(), (ConsensusError, u32)> { - let mut signatures_checked = 0u32; + validator_set: &'a ValidatorSet, + proof: &'a BeefyVersionedFinalityProof, +) -> Result>, (ConsensusError, u32)> { match proof { VersionedFinalityProof::V1(signed_commitment) => { - if signed_commitment.signatures.len() != validator_set.len() || - signed_commitment.commitment.validator_set_id != validator_set.id() || - signed_commitment.commitment.block_number != target_number - { - return Err((ConsensusError::InvalidJustification, 0)) - } - - // Arrangement of signatures in the commitment should be in the same order - // as validators for that set. - let message = signed_commitment.commitment.encode(); - let valid_signatures = validator_set - .validators() - .into_iter() - .zip(signed_commitment.signatures.iter()) - .filter(|(id, signature)| { - signature - .as_ref() - .map(|sig| { - signatures_checked += 1; - BeefyKeystore::verify(*id, sig, &message[..]) - }) - .unwrap_or(false) - }) - .count(); - if valid_signatures >= crate::round::threshold(validator_set.len()) { - Ok(()) + let signatories = signed_commitment + .verify_signatures::<_, BeefySignatureHasher>(target_number, validator_set) + .map_err(|checked_signatures| { + (ConsensusError::InvalidJustification, checked_signatures) + })?; + + if signatories.len() >= crate::round::threshold(validator_set.len()) { + Ok(signatories) } else { - Err((ConsensusError::InvalidJustification, signatures_checked)) + Err(( + ConsensusError::InvalidJustification, + signed_commitment.signature_count() as u32, + )) } }, } @@ -92,6 +76,7 @@ pub(crate) fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { + use codec::Encode; use sp_consensus_beefy::{ known_payloads, test_utils::Keyring, Commitment, Payload, SignedCommitment, VersionedFinalityProof, diff --git a/substrate/primitives/consensus/beefy/src/commitment.rs b/substrate/primitives/consensus/beefy/src/commitment.rs index 4fd9e1b0a6ed..8d3a6c6aa90f 100644 --- a/substrate/primitives/consensus/beefy/src/commitment.rs +++ b/substrate/primitives/consensus/beefy/src/commitment.rs @@ -19,8 +19,30 @@ use alloc::{vec, vec::Vec}; use codec::{Decode, Encode, Error, Input}; use core::cmp; use scale_info::TypeInfo; +use sp_application_crypto::RuntimeAppPublic; +use sp_runtime::traits::Hash; + +use crate::{BeefyAuthorityId, Payload, ValidatorSet, ValidatorSetId}; + +/// A commitment signature, accompanied by the id of the validator that it belongs to. +#[derive(Debug)] +pub struct KnownSignature { + /// The signing validator. + pub validator_id: TAuthorityId, + /// The signature. + pub signature: TSignature, +} -use crate::{Payload, ValidatorSetId}; +impl KnownSignature<&TAuthorityId, &TSignature> { + /// Creates a `KnownSignature` from an + /// `KnownSignature<&TAuthorityId, &TSignature>`. + pub fn to_owned(&self) -> KnownSignature { + KnownSignature { + validator_id: self.validator_id.clone(), + signature: self.signature.clone(), + } + } +} /// A commitment signed by GRANDPA validators as part of BEEFY protocol. /// @@ -113,9 +135,49 @@ impl core::fmt::Display impl SignedCommitment { /// Return the number of collected signatures. - pub fn no_of_signatures(&self) -> usize { + pub fn signature_count(&self) -> usize { self.signatures.iter().filter(|x| x.is_some()).count() } + + /// Verify all the commitment signatures against the validator set that was active + /// at the block where the commitment was generated. + /// + /// Returns the valid validator-signature pairs if the commitment can be verified. + pub fn verify_signatures<'a, TAuthorityId, MsgHash>( + &'a self, + target_number: TBlockNumber, + validator_set: &'a ValidatorSet, + ) -> Result>, u32> + where + TBlockNumber: Clone + Encode + PartialEq, + TAuthorityId: RuntimeAppPublic + BeefyAuthorityId, + MsgHash: Hash, + { + if self.signatures.len() != validator_set.len() || + self.commitment.validator_set_id != validator_set.id() || + self.commitment.block_number != target_number + { + return Err(0) + } + + // Arrangement of signatures in the commitment should be in the same order + // as validators for that set. + let encoded_commitment = self.commitment.encode(); + let signatories: Vec<_> = validator_set + .validators() + .into_iter() + .zip(self.signatures.iter()) + .filter_map(|(id, maybe_signature)| { + let signature = maybe_signature.as_ref()?; + match BeefyAuthorityId::verify(id, signature, &encoded_commitment) { + true => Some(KnownSignature { validator_id: id, signature }), + false => None, + } + }) + .collect(); + + Ok(signatories) + } } /// Type to be used to denote placement of signatures @@ -439,13 +501,13 @@ mod tests { commitment, signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], }; - assert_eq!(signed.no_of_signatures(), 2); + assert_eq!(signed.signature_count(), 2); // when signed.signatures[2] = None; // then - assert_eq!(signed.no_of_signatures(), 1); + assert_eq!(signed.signature_count(), 1); } #[test] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 70978ca559dd..6f644c5f790d 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -43,7 +43,7 @@ pub mod witness; #[cfg(feature = "std")] pub mod test_utils; -pub use commitment::{Commitment, SignedCommitment, VersionedFinalityProof}; +pub use commitment::{Commitment, KnownSignature, SignedCommitment, VersionedFinalityProof}; pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; use alloc::vec::Vec; From ac473cfa7b32d9a2ae9c2445de772a9cc4c460a1 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Wed, 24 Apr 2024 16:23:23 +0200 Subject: [PATCH 49/74] `AllowHrmpNotificationsFromRelayChain` barrier for HRMP notifications from the relaychain (#4156) This PR: - introduces `AllowHrmpNotificationsFromRelayChain` barrier for allowing HRMP notifications just from the relay chain (to fulfill safety assumptions - [see](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm/src/v4/mod.rs#L532)) - sets it up for all testnet SP parachains Continuation of: https://github.com/paritytech/polkadot-sdk/pull/3696 --- .../assets/asset-hub-rococo/src/xcm_config.rs | 24 ++- .../asset-hub-westend/src/xcm_config.rs | 10 +- .../bridge-hub-rococo/src/xcm_config.rs | 15 +- .../bridge-hub-westend/src/xcm_config.rs | 18 +- .../collectives-westend/src/xcm_config.rs | 19 +- .../contracts-rococo/src/xcm_config.rs | 18 +- .../coretime-rococo/src/xcm_config.rs | 16 +- .../coretime-westend/src/xcm_config.rs | 16 +- .../people/people-rococo/src/xcm_config.rs | 16 +- .../people/people-westend/src/xcm_config.rs | 16 +- .../runtimes/testing/penpal/src/xcm_config.rs | 19 +- .../testing/rococo-parachain/src/lib.rs | 8 +- polkadot/runtime/common/src/lib.rs | 2 +- polkadot/xcm/xcm-builder/src/barriers.rs | 35 ++++ polkadot/xcm/xcm-builder/src/lib.rs | 9 +- .../xcm/xcm-builder/src/tests/barriers.rs | 194 +++++++++++++----- prdoc/pr_4156.prdoc | 13 ++ 17 files changed, 309 insertions(+), 139 deletions(-) create mode 100644 prdoc/pr_4156.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index dbf27fb39ac5..a73c1cc33ea0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -49,17 +49,17 @@ use testnet_parachains_constants::rococo::snowbridge::{ }; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, - IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignPaidRemoteExporter, SovereignSignedViaLocation, StartsWith, - StartsWithExplicitGlobalConsensus, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, + GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, + NetworkExportTableItem, NoChecking, NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignPaidRemoteExporter, + SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -285,6 +285,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index ed8a58af396c..d610bfd768cd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -45,10 +45,10 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::{AccountIdConversion, ConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeFamily, DescribePalletTerminal, EnsureXcmOrigin, - FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal, + EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, GlobalConsensusParachainConvertsFor, HashedDescription, IsConcrete, LocalMint, NetworkExportTableItem, NoChecking, NonFungiblesAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, @@ -299,6 +299,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index f354ccce21fe..bd1445bee22c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -46,12 +46,13 @@ use testnet_parachains_constants::rococo::snowbridge::EthereumNetwork; use xcm::latest::prelude::*; use xcm_builder::{ deposit_or_burn_fee, AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, - AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, - FungibleAdapter, HandleFee, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeToAccount, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HandleFee, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeToAccount, }; use xcm_executor::{ traits::{FeeManager, FeeReason, FeeReason::Export, TransactAsset}, @@ -150,6 +151,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 31c37c8ffab6..f147cd9653fe 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -38,14 +38,14 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, - WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -139,6 +139,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index 4449284b8aa8..84697c3e3634 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -35,14 +35,15 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use westend_runtime_constants::xcm as xcm_constants; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, - IsConcrete, LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, LocatableAssetId, + OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, + SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, + SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -156,6 +157,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 9132b4e17602..ac15ac5b0f0f 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -38,14 +38,14 @@ use sp_runtime::traits::AccountIdConversion; use testnet_parachains_constants::rococo::currency::CENTS; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, - IsConcrete, NativeAsset, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, - XcmFeeToAccount, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, + FrameTransactionalProcessor, FungibleAdapter, IsConcrete, NativeAsset, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -149,6 +149,8 @@ pub type Barrier = TrailingSetTopicAsId< )>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs index 3e71730e015f..9095b5b1caaa 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/xcm_config.rs @@ -39,13 +39,13 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, - NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, IsConcrete, NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -156,6 +156,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs index fc7ecf1e61c3..defc57e2d7f5 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/xcm_config.rs @@ -39,13 +39,13 @@ use polkadot_runtime_common::xcm_sender::ExponentialPrice; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, IsConcrete, - NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FrameTransactionalProcessor, + FungibleAdapter, IsConcrete, NonFungibleAdapter, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -164,6 +164,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, FellowsPlurality)>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs index e4e4fa1b2c44..101d9a180e5f 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/xcm_config.rs @@ -36,13 +36,13 @@ use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, - HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeTerminus, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -165,6 +165,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs index 590f23f6853f..0a903f915056 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/xcm_config.rs @@ -36,13 +36,13 @@ use polkadot_parachain_primitives::primitives::Sibling; use sp_runtime::traits::AccountIdConversion; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, DescribeTerminus, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, - HashedDescription, IsConcrete, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, - UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + DenyReserveTransferToRelayChain, DenyThenTry, DescribeTerminus, EnsureXcmOrigin, + FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, ParentAsSuperuser, + ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, XcmFeeToAccount, }; use xcm_executor::XcmExecutor; @@ -173,6 +173,8 @@ pub type Barrier = TrailingSetTopicAsId< AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, FellowsPlurality)>, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 6832e2f4f440..711041f6d6e2 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -43,14 +43,15 @@ use polkadot_runtime_common::{impls::ToAuthor, xcm_sender::ExponentialPrice}; use sp_runtime::traits::{AccountIdConversion, ConvertInto, Identity, TryConvertInto}; use xcm::latest::prelude::*; use xcm_builder::{ - AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, - AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, EnsureXcmOrigin, - FixedWeightBounds, FrameTransactionalProcessor, FungibleAdapter, FungiblesAdapter, IsConcrete, - LocalMint, NativeAsset, NoChecking, ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, - SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative, - SignedToAccountId32, SovereignSignedViaLocation, StartsWith, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, - XcmFeeManagerFromComponents, XcmFeeToAccount, + AccountId32Aliases, AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, + ConvertedConcreteId, EnsureXcmOrigin, FixedWeightBounds, FrameTransactionalProcessor, + FungibleAdapter, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, NoChecking, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, StartsWith, TakeWeightCredit, TrailingSetTopicAsId, + UsingComponents, WithComputedOrigin, WithUniqueTopic, XcmFeeManagerFromComponents, + XcmFeeToAccount, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -217,6 +218,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowTopLevelPaidExecutionFrom, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, ), UniversalLocation, ConstU32<8>, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 0ae93d1577ce..11da6adb8190 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -74,9 +74,9 @@ use parachains_common::{ AccountId, AssetIdForTrustBackedAssets, Signature, }; use xcm_builder::{ - AllowKnownQueryResponses, AllowSubscriptionsFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, - FrameTransactionalProcessor, FungiblesAdapter, LocalMint, TrailingSetTopicAsId, - WithUniqueTopic, + AllowHrmpNotificationsFromRelayChain, AllowKnownQueryResponses, AllowSubscriptionsFrom, + AsPrefixedGeneralIndex, ConvertedConcreteId, FrameTransactionalProcessor, FungiblesAdapter, + LocalMint, TrailingSetTopicAsId, WithUniqueTopic, }; use xcm_executor::traits::JustTry; @@ -444,6 +444,8 @@ pub type Barrier = TrailingSetTopicAsId<( AllowKnownQueryResponses, // Subscriptions for version tracking are OK. AllowSubscriptionsFrom, + // HRMP notifications from the relay chain are OK. + AllowHrmpNotificationsFromRelayChain, )>; parameter_types! { diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 65161764ccd7..60cc684149b4 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Common runtime code for Polkadot and Kusama. +//! Common runtime code for the Relay Chain, e.g. Rococo, Westend, Polkadot, Kusama ... #![cfg_attr(not(feature = "std"), no_std)] diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index c0b328f38e96..11e9122f9a12 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -399,6 +399,41 @@ impl> ShouldExecute for AllowSubscriptionsFrom { } } +/// Allows execution for the Relay Chain origin (represented as `Location::parent()`) if it is just +/// a straight `HrmpNewChannelOpenRequest`, `HrmpChannelAccepted`, or `HrmpChannelClosing` +/// instruction. +/// +/// Note: This barrier fulfills safety recommendations for the mentioned instructions - see their +/// documentation. +pub struct AllowHrmpNotificationsFromRelayChain; +impl ShouldExecute for AllowHrmpNotificationsFromRelayChain { + fn should_execute( + origin: &Location, + instructions: &mut [Instruction], + _max_weight: Weight, + _properties: &mut Properties, + ) -> Result<(), ProcessMessageError> { + log::trace!( + target: "xcm::barriers", + "AllowHrmpNotificationsFromRelayChain origin: {:?}, instructions: {:?}, max_weight: {:?}, properties: {:?}", + origin, instructions, _max_weight, _properties, + ); + // accept only the Relay Chain + ensure!(matches!(origin.unpack(), (1, [])), ProcessMessageError::Unsupported); + // accept only HRMP notifications and nothing else + instructions + .matcher() + .assert_remaining_insts(1)? + .match_next_inst(|inst| match inst { + HrmpNewChannelOpenRequest { .. } | + HrmpChannelAccepted { .. } | + HrmpChannelClosing { .. } => Ok(()), + _ => Err(ProcessMessageError::BadFormat), + })?; + Ok(()) + } +} + /// Deny executing the XCM if it matches any of the Deny filter regardless of anything else. /// If it passes the Deny, and matches one of the Allow cases then it is let through. pub struct DenyThenTry(PhantomData, PhantomData) diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index cdc663a0cc9b..977da9a55de7 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -35,10 +35,11 @@ pub use asset_conversion::{ mod barriers; pub use barriers::{ - AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, AllowSubscriptionsFrom, - AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, DenyReserveTransferToRelayChain, - DenyThenTry, IsChildSystemParachain, IsParentsOnly, IsSiblingSystemParachain, - RespectSuspension, TakeWeightCredit, TrailingSetTopicAsId, WithComputedOrigin, + AllowExplicitUnpaidExecutionFrom, AllowHrmpNotificationsFromRelayChain, + AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, + AllowUnpaidExecutionFrom, DenyReserveTransferToRelayChain, DenyThenTry, IsChildSystemParachain, + IsParentsOnly, IsSiblingSystemParachain, RespectSuspension, TakeWeightCredit, + TrailingSetTopicAsId, WithComputedOrigin, }; mod controller; diff --git a/polkadot/xcm/xcm-builder/src/tests/barriers.rs b/polkadot/xcm/xcm-builder/src/tests/barriers.rs index 6516263f57a0..665b5febc61f 100644 --- a/polkadot/xcm/xcm-builder/src/tests/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/tests/barriers.rs @@ -315,56 +315,150 @@ fn allow_subscriptions_from_should_work() { // allow only parent AllowSubsFrom::set(vec![Location::parent()]); - let valid_xcm_1 = Xcm::(vec![SubscribeVersion { - query_id: 42, - max_response_weight: Weight::from_parts(5000, 5000), - }]); - let valid_xcm_2 = Xcm::(vec![UnsubscribeVersion]); - let invalid_xcm_1 = Xcm::(vec![ - SetAppendix(Xcm(vec![])), - SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, - ]); - let invalid_xcm_2 = Xcm::(vec![ - SubscribeVersion { query_id: 42, max_response_weight: Weight::from_parts(5000, 5000) }, - SetTopic([0; 32]), - ]); + // closure for (xcm, origin) testing with `AllowSubscriptionsFrom` + let assert_should_execute = |mut xcm: Vec>, origin, expected_result| { + assert_eq!( + AllowSubscriptionsFrom::>::should_execute( + &origin, + &mut xcm, + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ), + expected_result + ); + }; + + // invalid origin + assert_should_execute( + vec![SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }], + Parachain(1).into_location(), + Err(ProcessMessageError::Unsupported), + ); + assert_should_execute( + vec![UnsubscribeVersion], + Parachain(1).into_location(), + Err(ProcessMessageError::Unsupported), + ); - let test_data = vec![ - ( - valid_xcm_1.clone(), - Parachain(1).into_location(), - // not allowed origin - Err(ProcessMessageError::Unsupported), - ), - (valid_xcm_1, Location::parent(), Ok(())), - ( - valid_xcm_2.clone(), - Parachain(1).into_location(), - // not allowed origin - Err(ProcessMessageError::Unsupported), - ), - (valid_xcm_2, Location::parent(), Ok(())), - ( - invalid_xcm_1, - Location::parent(), - // invalid XCM - Err(ProcessMessageError::BadFormat), - ), - ( - invalid_xcm_2, - Location::parent(), - // invalid XCM - Err(ProcessMessageError::BadFormat), - ), - ]; - - for (mut message, origin, expected_result) in test_data { - let r = AllowSubscriptionsFrom::>::should_execute( - &origin, - message.inner_mut(), - Weight::from_parts(10, 10), - &mut props(Weight::zero()), + // invalid XCM (unexpected instruction before) + assert_should_execute( + vec![ + SetAppendix(Xcm(vec![])), + SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }, + ], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + assert_should_execute( + vec![SetAppendix(Xcm(vec![])), UnsubscribeVersion], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction after) + assert_should_execute( + vec![ + SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }, + SetTopic([0; 32]), + ], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + assert_should_execute( + vec![UnsubscribeVersion, SetTopic([0; 32])], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction) + assert_should_execute( + vec![SetAppendix(Xcm(vec![]))], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + + // ok + assert_should_execute( + vec![SubscribeVersion { + query_id: Default::default(), + max_response_weight: Default::default(), + }], + Location::parent(), + Ok(()), + ); + assert_should_execute(vec![UnsubscribeVersion], Location::parent(), Ok(())); +} + +#[test] +fn allow_hrmp_notifications_from_relay_chain_should_work() { + // closure for (xcm, origin) testing with `AllowHrmpNotificationsFromRelayChain` + let assert_should_execute = |mut xcm: Vec>, origin, expected_result| { + assert_eq!( + AllowHrmpNotificationsFromRelayChain::should_execute( + &origin, + &mut xcm, + Weight::from_parts(10, 10), + &mut props(Weight::zero()), + ), + expected_result ); - assert_eq!(r, expected_result, "Failed for origin: {origin:?} and message: {message:?}"); - } + }; + + // invalid origin + assert_should_execute( + vec![HrmpChannelAccepted { recipient: Default::default() }], + Location::new(1, [Parachain(1)]), + Err(ProcessMessageError::Unsupported), + ); + + // invalid XCM (unexpected instruction before) + assert_should_execute( + vec![SetAppendix(Xcm(vec![])), HrmpChannelAccepted { recipient: Default::default() }], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction after) + assert_should_execute( + vec![HrmpChannelAccepted { recipient: Default::default() }, SetTopic([0; 32])], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + // invalid XCM (unexpected instruction) + assert_should_execute( + vec![SetAppendix(Xcm(vec![]))], + Location::parent(), + Err(ProcessMessageError::BadFormat), + ); + + // ok + assert_should_execute( + vec![HrmpChannelAccepted { recipient: Default::default() }], + Location::parent(), + Ok(()), + ); + assert_should_execute( + vec![HrmpNewChannelOpenRequest { + max_capacity: Default::default(), + sender: Default::default(), + max_message_size: Default::default(), + }], + Location::parent(), + Ok(()), + ); + assert_should_execute( + vec![HrmpChannelClosing { + recipient: Default::default(), + sender: Default::default(), + initiator: Default::default(), + }], + Location::parent(), + Ok(()), + ); } diff --git a/prdoc/pr_4156.prdoc b/prdoc/pr_4156.prdoc new file mode 100644 index 000000000000..fc09a4e0df44 --- /dev/null +++ b/prdoc/pr_4156.prdoc @@ -0,0 +1,13 @@ +title: "`AllowHrmpNotificationsFromRelayChain` barrier for HRMP notifications from the relaychain" + +doc: + - audience: Runtime Dev + description: | + A new barrier, `AllowHrmpNotificationsFromRelayChain`, has been added. + This barrier can be utilized to ensure that HRMP notifications originate solely from the Relay Chain. + If your runtime relies on these notifications, + you can include it in the runtime's barrier type for `xcm_executor::Config`. + +crates: +- name: staging-xcm-builder + bump: minor From d29c3636fac2cccadd774d467a2b891daca4d283 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Wed, 24 Apr 2024 16:54:07 +0200 Subject: [PATCH 50/74] Updated review-bot to obtain number from event (#4271) It seems that `review-trigger` is not uploading the artifact that is used by `review-bot`, so I changed the PR-Number to be obtained by the previous event that triggered this action. I also took the liberty to replace `tibdex/github-app-token` for `actions/create-github-app-token` which is GitHub's official app. --- .github/workflows/review-bot.yml | 14 +++++--------- .github/workflows/review-trigger.yml | 17 ++--------------- 2 files changed, 7 insertions(+), 24 deletions(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index 5b036115b238..fb877357b232 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -11,22 +11,18 @@ jobs: runs-on: ubuntu-latest environment: master steps: - - name: Extract content of artifact - id: number - uses: Bullrich/extract-text-from-artifact@v1.0.0 - with: - artifact-name: pr_number - name: Generate token id: app_token - uses: tibdex/github-app-token@v1 + uses: actions/create-github-app-token@v1.9.3 with: - app_id: ${{ secrets.REVIEW_APP_ID }} - private_key: ${{ secrets.REVIEW_APP_KEY }} + app-id: ${{ secrets.REVIEW_APP_ID }} + private-key: ${{ secrets.REVIEW_APP_KEY }} - name: "Evaluates PR reviews and assigns reviewers" uses: paritytech/review-bot@v2.4.0 with: repo-token: ${{ steps.app_token.outputs.token }} team-token: ${{ steps.app_token.outputs.token }} checks-token: ${{ steps.app_token.outputs.token }} - pr-number: ${{ steps.number.outputs.content }} + # This is extracted from the triggering event + pr-number: ${{ github.event.workflow_run.pull_requests[0].number }} request-reviewers: true diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 7f7d9d362782..6437be161d34 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -45,7 +45,7 @@ jobs: # We request them to review again echo $REVIEWERS | gh api --method POST repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/requested_reviewers --input - - + echo "::error::Project needs to be reviewed again" exit 1 env: @@ -53,21 +53,8 @@ jobs: - name: Comment requirements # If the previous step failed and github-actions hasn't commented yet we comment instructions if: failure() && !contains(fromJson(steps.comments.outputs.bodies), 'Review required! Latest push from author must always be reviewed') - run: | + run: | gh pr comment ${{ github.event.pull_request.number }} --repo ${{ github.repository }} --body "Review required! Latest push from author must always be reviewed" env: GH_TOKEN: ${{ github.token }} COMMENTS: ${{ steps.comments.outputs.users }} - - name: Get PR number - env: - PR_NUMBER: ${{ github.event.pull_request.number }} - run: | - echo "Saving PR number: $PR_NUMBER" - mkdir -p ./pr - echo $PR_NUMBER > ./pr/pr_number - - uses: actions/upload-artifact@v3 - name: Save PR number - with: - name: pr_number - path: pr/ - retention-days: 5 From 4f3d43a0c4e75caf73c1034a85590f81a9ae3809 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 24 Apr 2024 17:49:33 +0200 Subject: [PATCH 51/74] Revert `execute_blob` and `send_blob` (#4266) Revert "pallet-xcm: Deprecate `execute` and `send` in favor of `execute_blob` and `send_blob` (#3749)" This reverts commit feee773d15d5237765b520b03854d46652181de5. --------- Co-authored-by: Adrian Catangiu Co-authored-by: Javier Bullrich --- Cargo.lock | 1 - .../emulated/chains/relays/westend/Cargo.toml | 1 + .../emulated/common/src/impls.rs | 6 +- .../assets/asset-hub-rococo/src/tests/send.rs | 8 +- .../assets/asset-hub-rococo/src/tests/swap.rs | 4 +- .../asset-hub-westend/src/tests/send.rs | 8 +- .../asset-hub-westend/src/tests/swap.rs | 4 +- .../bridge-hub-rococo/src/tests/send_xcm.rs | 7 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 18 +- .../bridges/bridge-hub-westend/Cargo.toml | 1 - .../bridge-hub-westend/src/tests/send_xcm.rs | 7 +- .../src/weights/pallet_xcm.rs | 112 ++++------ .../src/weights/pallet_xcm.rs | 30 --- .../src/weights/pallet_xcm.rs | 108 ++++----- .../src/weights/pallet_xcm.rs | 108 ++++----- .../src/weights/pallet_xcm.rs | 106 ++++----- .../coretime-rococo/src/weights/pallet_xcm.rs | 102 +++------ .../src/weights/pallet_xcm.rs | 102 +++------ .../people-rococo/src/weights/pallet_xcm.rs | 102 +++------ .../people-westend/src/weights/pallet_xcm.rs | 102 +++------ polkadot/runtime/rococo/src/impls.rs | 9 +- .../runtime/rococo/src/weights/pallet_xcm.rs | 110 ++++------ polkadot/runtime/westend/src/impls.rs | 9 +- .../runtime/westend/src/weights/pallet_xcm.rs | 108 ++++----- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 29 --- polkadot/xcm/pallet-xcm/src/lib.rs | 205 +++++------------- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 72 +++--- polkadot/xcm/src/lib.rs | 3 - polkadot/xcm/src/v4/mod.rs | 20 +- polkadot/xcm/xcm-builder/src/controller.rs | 39 ++-- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- .../src/parachain/contracts_config.rs | 14 +- .../frame/contracts/mock-network/src/tests.rs | 33 ++- substrate/frame/contracts/src/lib.rs | 3 - substrate/frame/contracts/src/wasm/runtime.rs | 54 ++++- substrate/frame/contracts/uapi/src/host.rs | 2 +- 36 files changed, 602 insertions(+), 1047 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62479cce2a0e..ad7729d4b30e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2222,7 +2222,6 @@ dependencies = [ "pallet-message-queue", "pallet-xcm", "parachains-common", - "parity-scale-codec", "rococo-westend-system-emulated-network", "sp-runtime", "staging-xcm", diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml index 12a3ad60e0e0..20aedb50e6a1 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -11,6 +11,7 @@ publish = false workspace = true [dependencies] + # Substrate sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index c8a2f097abe9..8f2789eb2f3a 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -360,7 +360,7 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { recipient: $crate::impls::ParaId, call: $crate::impls::DoubleEncoded<()> ) { - use $crate::impls::{bx, Chain, RelayChain, Encode}; + use $crate::impls::{bx, Chain, RelayChain}; ::execute_with(|| { let root_origin = ::RuntimeOrigin::root(); @@ -368,10 +368,10 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); // Send XCM `Transact` - $crate::impls::assert_ok!(]>::XcmPallet::send_blob( + $crate::impls::assert_ok!(]>::XcmPallet::send( root_origin, bx!(destination.into()), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); Self::assert_xcm_pallet_sent(); }); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 1d120f1dc4c7..364fbd0d439f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index 919e0080ba62..ec48e400ff54 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -372,10 +372,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( penpal_root, bx!(asset_hub_location), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index f218b539c387..eb0e985cc0ce 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -75,10 +75,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); @@ -159,10 +159,10 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { )]); PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 31f763be6370..f6b658098865 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -371,10 +371,10 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { penpal.clone(), ); - assert_ok!(::PolkadotXcm::send_blob( + assert_ok!(::PolkadotXcm::send( penpal_root, bx!(asset_hub_location), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); PenpalA::assert_xcm_pallet_sent(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 4bd041dc03f4..a1d871cdb618 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -14,7 +14,6 @@ // limitations under the License. use crate::tests::*; -use codec::Encode; #[test] fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable() { @@ -27,7 +26,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm::<()>(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: WestendId.into(), @@ -39,10 +38,10 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index e332eb5bfda7..d0c02e611349 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -82,7 +82,7 @@ fn create_agent() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let remote_xcm = VersionedXcm::from(Xcm::<()>(vec![ + let remote_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -95,10 +95,10 @@ fn create_agent() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - remote_xcm.encode().try_into().unwrap(), + bx!(remote_xcm), )); type RuntimeEvent = ::RuntimeEvent; @@ -140,7 +140,7 @@ fn create_channel() { let create_agent_call = SnowbridgeControl::Control(ControlCall::CreateAgent {}); // Construct XCM to create an agent for para 1001 - let create_agent_xcm = VersionedXcm::from(Xcm::<()>(vec![ + let create_agent_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -153,7 +153,7 @@ fn create_channel() { let create_channel_call = SnowbridgeControl::Control(ControlCall::CreateChannel { mode: OperatingMode::Normal }); // Construct XCM to create a channel for para 1001 - let create_channel_xcm = VersionedXcm::from(Xcm::<()>(vec![ + let create_channel_xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit: Unlimited, check_origin: None }, DescendOrigin(Parachain(origin_para).into()), Transact { @@ -166,16 +166,16 @@ fn create_channel() { // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin.clone(), bx!(destination.clone()), - create_agent_xcm.encode().try_into().unwrap(), + bx!(create_agent_xcm), )); - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - create_channel_xcm.encode().try_into().unwrap(), + bx!(create_channel_xcm), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 3aa2e2bcbe06..6aebf8862d62 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,7 +11,6 @@ publish = false workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.0" } # Substrate frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index f69747c17704..b01be5e8dc84 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -14,7 +14,6 @@ // limitations under the License. use crate::tests::*; -use codec::Encode; #[test] fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable() { @@ -27,7 +26,7 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable let remote_xcm = Xcm(vec![ClearOrigin]); - let xcm = VersionedXcm::from(Xcm::<()>(vec![ + let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, @@ -39,10 +38,10 @@ fn send_xcm_from_westend_relay_to_rococo_asset_hub_should_fail_on_not_applicable // Westend Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Westend::execute_with(|| { - assert_ok!(::XcmPallet::send_blob( + assert_ok!(::XcmPallet::send( sudo_origin, bx!(destination), - xcm.encode().try_into().unwrap(), + bx!(xcm), )); type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index e0e231d7da27..51b6543bae82 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_224_000 picoseconds. - Weight::from_parts(21_821_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 21_474_000 picoseconds. - Weight::from_parts(22_072_000, 0) + // Minimum execution time: 22_136_000 picoseconds. + Weight::from_parts(22_518_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 90_677_000 picoseconds. - Weight::from_parts(93_658_000, 0) + // Minimum execution time: 92_277_000 picoseconds. + Weight::from_parts(94_843_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -140,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `400` // Estimated: `6196` - // Minimum execution time: 116_767_000 picoseconds. - Weight::from_parts(118_843_000, 0) + // Minimum execution time: 120_110_000 picoseconds. + Weight::from_parts(122_968_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) @@ -170,8 +148,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `496` // Estimated: `6208` - // Minimum execution time: 137_983_000 picoseconds. - Weight::from_parts(141_396_000, 0) + // Minimum execution time: 143_116_000 picoseconds. + Weight::from_parts(147_355_000, 0) .saturating_add(Weight::from_parts(0, 6208)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -186,24 +164,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_232_000 picoseconds. - Weight::from_parts(6_507_000, 0) + // Minimum execution time: 6_517_000 picoseconds. + Weight::from_parts(6_756_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -213,8 +181,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_884_000 picoseconds. - Weight::from_parts(2_016_000, 0) + // Minimum execution time: 1_894_000 picoseconds. + Weight::from_parts(2_024_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -240,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 26_637_000 picoseconds. - Weight::from_parts(27_616_000, 0) + // Minimum execution time: 27_314_000 picoseconds. + Weight::from_parts(28_787_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -266,8 +234,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 28_668_000 picoseconds. - Weight::from_parts(29_413_000, 0) + // Minimum execution time: 29_840_000 picoseconds. + Weight::from_parts(30_589_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -278,8 +246,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_990_000 picoseconds. - Weight::from_parts(2_114_000, 0) + // Minimum execution time: 1_893_000 picoseconds. + Weight::from_parts(2_017_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -289,8 +257,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 18_856_000 picoseconds. - Weight::from_parts(19_430_000, 0) + // Minimum execution time: 19_211_000 picoseconds. + Weight::from_parts(19_552_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -301,8 +269,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 19_068_000 picoseconds. - Weight::from_parts(19_434_000, 0) + // Minimum execution time: 19_177_000 picoseconds. + Weight::from_parts(19_704_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -313,8 +281,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 21_055_000 picoseconds. - Weight::from_parts(21_379_000, 0) + // Minimum execution time: 20_449_000 picoseconds. + Weight::from_parts(21_075_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -336,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 25_736_000 picoseconds. - Weight::from_parts(26_423_000, 0) + // Minimum execution time: 26_578_000 picoseconds. + Weight::from_parts(27_545_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -348,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_853_000 picoseconds. - Weight::from_parts(12_215_000, 0) + // Minimum execution time: 11_646_000 picoseconds. + Weight::from_parts(11_944_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -359,8 +327,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 19_418_000 picoseconds. - Weight::from_parts(19_794_000, 0) + // Minimum execution time: 19_301_000 picoseconds. + Weight::from_parts(19_664_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -383,8 +351,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 34_719_000 picoseconds. - Weight::from_parts(35_260_000, 0) + // Minimum execution time: 35_715_000 picoseconds. + Weight::from_parts(36_915_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -397,8 +365,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `1588` - // Minimum execution time: 4_937_000 picoseconds. - Weight::from_parts(5_203_000, 0) + // Minimum execution time: 4_871_000 picoseconds. + Weight::from_parts(5_066_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -409,8 +377,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_064_000 picoseconds. - Weight::from_parts(26_497_000, 0) + // Minimum execution time: 25_150_000 picoseconds. + Weight::from_parts(26_119_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -421,8 +389,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 37_132_000 picoseconds. - Weight::from_parts(37_868_000, 0) + // Minimum execution time: 38_248_000 picoseconds. + Weight::from_parts(39_122_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index a36c25f96043..be3d7661ab3c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -70,28 +70,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 21_164_000 picoseconds. - Weight::from_parts(21_656_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -184,14 +162,6 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(7_791_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_585_000 picoseconds. - Weight::from_parts(7_897_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index adfaa9ea2028..a732e1a57343 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 18_732_000 picoseconds. - Weight::from_parts(19_386_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 18_943_000 picoseconds. - Weight::from_parts(19_455_000, 0) + // Minimum execution time: 18_513_000 picoseconds. + Weight::from_parts(19_156_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_917_000 picoseconds. - Weight::from_parts(91_611_000, 0) + // Minimum execution time: 88_096_000 picoseconds. + Weight::from_parts(89_732_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 88_587_000 picoseconds. - Weight::from_parts(90_303_000, 0) + // Minimum execution time: 88_239_000 picoseconds. + Weight::from_parts(89_729_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -164,24 +142,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_856_000 picoseconds. - Weight::from_parts(6_202_000, 0) + // Minimum execution time: 5_955_000 picoseconds. + Weight::from_parts(6_266_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_797_000 picoseconds. - Weight::from_parts(1_970_000, 0) + // Minimum execution time: 1_868_000 picoseconds. + Weight::from_parts(1_961_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_479_000 picoseconds. - Weight::from_parts(25_058_000, 0) + // Minimum execution time: 24_388_000 picoseconds. + Weight::from_parts(25_072_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -244,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 27_282_000 picoseconds. - Weight::from_parts(27_924_000, 0) + // Minimum execution time: 26_762_000 picoseconds. + Weight::from_parts(27_631_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -256,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_801_000 picoseconds. - Weight::from_parts(1_988_000, 0) + // Minimum execution time: 1_856_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -267,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_509_000 picoseconds. - Weight::from_parts(16_939_000, 0) + // Minimum execution time: 17_718_000 picoseconds. + Weight::from_parts(18_208_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_140_000 picoseconds. - Weight::from_parts(16_843_000, 0) + // Minimum execution time: 17_597_000 picoseconds. + Weight::from_parts(18_090_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -291,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_160_000 picoseconds. - Weight::from_parts(18_948_000, 0) + // Minimum execution time: 19_533_000 picoseconds. + Weight::from_parts(20_164_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -314,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 24_409_000 picoseconds. - Weight::from_parts(25_261_000, 0) + // Minimum execution time: 24_958_000 picoseconds. + Weight::from_parts(25_628_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -326,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_848_000 picoseconds. - Weight::from_parts(11_241_000, 0) + // Minimum execution time: 12_209_000 picoseconds. + Weight::from_parts(12_612_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -337,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_609_000 picoseconds. - Weight::from_parts(17_044_000, 0) + // Minimum execution time: 17_844_000 picoseconds. + Weight::from_parts(18_266_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -361,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 32_500_000 picoseconds. - Weight::from_parts(33_475_000, 0) + // Minimum execution time: 34_131_000 picoseconds. + Weight::from_parts(34_766_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -375,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_484_000 picoseconds. - Weight::from_parts(3_673_000, 0) + // Minimum execution time: 3_525_000 picoseconds. + Weight::from_parts(3_724_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -387,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 25_225_000 picoseconds. - Weight::from_parts(25_731_000, 0) + // Minimum execution time: 24_975_000 picoseconds. + Weight::from_parts(25_517_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -399,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_961_000 picoseconds. - Weight::from_parts(34_818_000, 0) + // Minimum execution time: 33_761_000 picoseconds. + Weight::from_parts(34_674_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 9cf4c61466a1..a78ff2355efa 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 19_702_000 picoseconds. - Weight::from_parts(20_410_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 19_525_000 picoseconds. - Weight::from_parts(20_071_000, 0) + // Minimum execution time: 19_527_000 picoseconds. + Weight::from_parts(19_839_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_793_000 picoseconds. - Weight::from_parts(93_761_000, 0) + // Minimum execution time: 90_938_000 picoseconds. + Weight::from_parts(92_822_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `107` // Estimated: `3593` - // Minimum execution time: 91_819_000 picoseconds. - Weight::from_parts(93_198_000, 0) + // Minimum execution time: 90_133_000 picoseconds. + Weight::from_parts(92_308_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -164,24 +142,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_183_000 picoseconds. - Weight::from_parts(6_598_000, 0) + // Minimum execution time: 6_205_000 picoseconds. + Weight::from_parts(6_595_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_987_000 picoseconds. - Weight::from_parts(2_076_000, 0) + // Minimum execution time: 1_927_000 picoseconds. + Weight::from_parts(2_062_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_375_000 picoseconds. - Weight::from_parts(26_165_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_782_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -244,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 28_167_000 picoseconds. - Weight::from_parts(28_792_000, 0) + // Minimum execution time: 28_188_000 picoseconds. + Weight::from_parts(28_826_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -256,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_039_000 picoseconds. - Weight::from_parts(2_211_000, 0) + // Minimum execution time: 1_886_000 picoseconds. + Weight::from_parts(1_991_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -267,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 17_127_000 picoseconds. - Weight::from_parts(17_519_000, 0) + // Minimum execution time: 17_443_000 picoseconds. + Weight::from_parts(17_964_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_701_000 picoseconds. - Weight::from_parts(17_250_000, 0) + // Minimum execution time: 17_357_000 picoseconds. + Weight::from_parts(18_006_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -291,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_795_000 picoseconds. - Weight::from_parts(19_302_000, 0) + // Minimum execution time: 18_838_000 picoseconds. + Weight::from_parts(19_688_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -314,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 25_007_000 picoseconds. - Weight::from_parts(25_786_000, 0) + // Minimum execution time: 25_517_000 picoseconds. + Weight::from_parts(26_131_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -326,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_534_000 picoseconds. - Weight::from_parts(11_798_000, 0) + // Minimum execution time: 11_587_000 picoseconds. + Weight::from_parts(11_963_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -337,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 17_357_000 picoseconds. - Weight::from_parts(17_629_000, 0) + // Minimum execution time: 17_490_000 picoseconds. + Weight::from_parts(18_160_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -361,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 33_487_000 picoseconds. - Weight::from_parts(34_033_000, 0) + // Minimum execution time: 34_088_000 picoseconds. + Weight::from_parts(34_598_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -375,8 +343,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_688_000 picoseconds. - Weight::from_parts(3_854_000, 0) + // Minimum execution time: 3_566_000 picoseconds. + Weight::from_parts(3_754_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -387,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 26_336_000 picoseconds. - Weight::from_parts(26_873_000, 0) + // Minimum execution time: 25_078_000 picoseconds. + Weight::from_parts(25_477_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -399,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_633_000 picoseconds. - Weight::from_parts(35_171_000, 0) + // Minimum execution time: 34_661_000 picoseconds. + Weight::from_parts(35_411_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs index 0edd5dfff2b8..5d427d850046 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -64,30 +64,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 21_911_000 picoseconds. - Weight::from_parts(22_431_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 22_143_000 picoseconds. - Weight::from_parts(22_843_000, 0) + // Minimum execution time: 21_813_000 picoseconds. + Weight::from_parts(22_332_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -112,8 +90,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 96_273_000 picoseconds. - Weight::from_parts(98_351_000, 0) + // Minimum execution time: 93_243_000 picoseconds. + Weight::from_parts(95_650_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,8 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `214` // Estimated: `3679` - // Minimum execution time: 95_571_000 picoseconds. - Weight::from_parts(96_251_000, 0) + // Minimum execution time: 96_199_000 picoseconds. + Weight::from_parts(98_620_000, 0) .saturating_add(Weight::from_parts(0, 3679)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -164,24 +142,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_227_000 picoseconds. - Weight::from_parts(6_419_000, 0) + // Minimum execution time: 6_442_000 picoseconds. + Weight::from_parts(6_682_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +159,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_851_000 picoseconds. - Weight::from_parts(1_940_000, 0) + // Minimum execution time: 1_833_000 picoseconds. + Weight::from_parts(1_973_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -218,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 27_449_000 picoseconds. - Weight::from_parts(28_513_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_224_000, 0) .saturating_add(Weight::from_parts(0, 3610)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) @@ -244,8 +212,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 29_477_000 picoseconds. - Weight::from_parts(30_251_000, 0) + // Minimum execution time: 29_070_000 picoseconds. + Weight::from_parts(30_205_000, 0) .saturating_add(Weight::from_parts(0, 3828)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) @@ -256,8 +224,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_894_000 picoseconds. - Weight::from_parts(2_009_000, 0) + // Minimum execution time: 1_904_000 picoseconds. + Weight::from_parts(2_033_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -267,8 +235,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `159` // Estimated: `13524` - // Minimum execution time: 17_991_000 picoseconds. - Weight::from_parts(18_651_000, 0) + // Minimum execution time: 18_348_000 picoseconds. + Weight::from_parts(18_853_000, 0) .saturating_add(Weight::from_parts(0, 13524)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -279,8 +247,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `163` // Estimated: `13528` - // Minimum execution time: 18_321_000 picoseconds. - Weight::from_parts(18_701_000, 0) + // Minimum execution time: 17_964_000 picoseconds. + Weight::from_parts(18_548_000, 0) .saturating_add(Weight::from_parts(0, 13528)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -291,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `16013` - // Minimum execution time: 19_762_000 picoseconds. - Weight::from_parts(20_529_000, 0) + // Minimum execution time: 19_708_000 picoseconds. + Weight::from_parts(20_157_000, 0) .saturating_add(Weight::from_parts(0, 16013)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -314,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 26_927_000 picoseconds. - Weight::from_parts(27_629_000, 0) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_314_000, 0) .saturating_add(Weight::from_parts(0, 6152)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) @@ -326,8 +294,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `11096` - // Minimum execution time: 11_957_000 picoseconds. - Weight::from_parts(12_119_000, 0) + // Minimum execution time: 11_929_000 picoseconds. + Weight::from_parts(12_304_000, 0) .saturating_add(Weight::from_parts(0, 11096)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -337,8 +305,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `170` // Estimated: `13535` - // Minimum execution time: 17_942_000 picoseconds. - Weight::from_parts(18_878_000, 0) + // Minimum execution time: 18_599_000 picoseconds. + Weight::from_parts(19_195_000, 0) .saturating_add(Weight::from_parts(0, 13535)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -361,8 +329,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `13577` - // Minimum execution time: 35_640_000 picoseconds. - Weight::from_parts(36_340_000, 0) + // Minimum execution time: 35_524_000 picoseconds. + Weight::from_parts(36_272_000, 0) .saturating_add(Weight::from_parts(0, 13577)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) @@ -376,7 +344,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `103` // Estimated: `1588` // Minimum execution time: 4_044_000 picoseconds. - Weight::from_parts(4_229_000, 0) + Weight::from_parts(4_238_000, 0) .saturating_add(Weight::from_parts(0, 1588)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -387,8 +355,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7740` // Estimated: `11205` - // Minimum execution time: 26_262_000 picoseconds. - Weight::from_parts(26_842_000, 0) + // Minimum execution time: 25_741_000 picoseconds. + Weight::from_parts(26_301_000, 0) .saturating_add(Weight::from_parts(0, 11205)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -399,8 +367,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 36_775_000 picoseconds. - Weight::from_parts(37_265_000, 0) + // Minimum execution time: 35_925_000 picoseconds. + Weight::from_parts(36_978_000, 0) .saturating_add(Weight::from_parts(0, 3625)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs index df0044089c8f..c5d315467c1e 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 18_767_000 picoseconds. - Weight::from_parts(19_420_000, 0) - .saturating_add(Weight::from_parts(0, 3539)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `74` - // Estimated: `3539` - // Minimum execution time: 19_184_000 picoseconds. - Weight::from_parts(19_695_000, 0) + // Minimum execution time: 35_051_000 picoseconds. + Weight::from_parts(35_200_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 58_120_000 picoseconds. - Weight::from_parts(59_533_000, 0) + // Minimum execution time: 56_235_000 picoseconds. + Weight::from_parts(58_178_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_074_000 picoseconds. - Weight::from_parts(6_398_000, 0) + // Minimum execution time: 6_226_000 picoseconds. + Weight::from_parts(6_403_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_036_000 picoseconds. - Weight::from_parts(2_180_000, 0) + // Minimum execution time: 2_020_000 picoseconds. + Weight::from_parts(2_100_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 25_014_000 picoseconds. - Weight::from_parts(25_374_000, 0) + // Minimum execution time: 24_387_000 picoseconds. + Weight::from_parts(24_814_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 27_616_000 picoseconds. - Weight::from_parts(28_499_000, 0) + // Minimum execution time: 27_039_000 picoseconds. + Weight::from_parts(27_693_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_061_000 picoseconds. - Weight::from_parts(2_153_000, 0) + // Minimum execution time: 1_920_000 picoseconds. + Weight::from_parts(2_082_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_592_000 picoseconds. - Weight::from_parts(16_900_000, 0) + // Minimum execution time: 17_141_000 picoseconds. + Weight::from_parts(17_500_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_694_000 picoseconds. - Weight::from_parts(16_905_000, 0) + // Minimum execution time: 17_074_000 picoseconds. + Weight::from_parts(17_431_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_779_000 picoseconds. - Weight::from_parts(18_490_000, 0) + // Minimum execution time: 19_139_000 picoseconds. + Weight::from_parts(19_474_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 24_526_000 picoseconds. - Weight::from_parts(25_182_000, 0) + // Minimum execution time: 24_346_000 picoseconds. + Weight::from_parts(25_318_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_467_000 picoseconds. - Weight::from_parts(10_934_000, 0) + // Minimum execution time: 11_777_000 picoseconds. + Weight::from_parts(12_051_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_377_000 picoseconds. - Weight::from_parts(17_114_000, 0) + // Minimum execution time: 17_538_000 picoseconds. + Weight::from_parts(17_832_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 32_575_000 picoseconds. - Weight::from_parts(33_483_000, 0) + // Minimum execution time: 33_623_000 picoseconds. + Weight::from_parts(34_186_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_604_000 picoseconds. - Weight::from_parts(3_744_000, 0) + // Minimum execution time: 3_363_000 picoseconds. + Weight::from_parts(3_511_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_983_000 picoseconds. - Weight::from_parts(24_404_000, 0) + // Minimum execution time: 23_969_000 picoseconds. + Weight::from_parts(24_347_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 34_446_000 picoseconds. - Weight::from_parts(35_465_000, 0) + // Minimum execution time: 34_071_000 picoseconds. + Weight::from_parts(35_031_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs index a1701c5f1c2c..0082db3099d0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 17_681_000 picoseconds. - Weight::from_parts(18_350_000, 0) - .saturating_add(Weight::from_parts(0, 3539)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `74` - // Estimated: `3539` - // Minimum execution time: 18_091_000 picoseconds. - Weight::from_parts(18_327_000, 0) + // Minimum execution time: 18_410_000 picoseconds. + Weight::from_parts(18_657_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 54_943_000 picoseconds. - Weight::from_parts(56_519_000, 0) + // Minimum execution time: 56_616_000 picoseconds. + Weight::from_parts(57_751_000, 0) .saturating_add(Weight::from_parts(0, 3571)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_887_000 picoseconds. - Weight::from_parts(6_101_000, 0) + // Minimum execution time: 6_014_000 picoseconds. + Weight::from_parts(6_412_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_940_000 picoseconds. - Weight::from_parts(2_022_000, 0) + // Minimum execution time: 1_844_000 picoseconds. + Weight::from_parts(1_957_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 23_165_000 picoseconds. - Weight::from_parts(23_800_000, 0) + // Minimum execution time: 24_067_000 picoseconds. + Weight::from_parts(24_553_000, 0) .saturating_add(Weight::from_parts(0, 3539)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 26_506_000 picoseconds. - Weight::from_parts(27_180_000, 0) + // Minimum execution time: 27_023_000 picoseconds. + Weight::from_parts(27_620_000, 0) .saturating_add(Weight::from_parts(0, 3757)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_002_000, 0) + // Minimum execution time: 1_866_000 picoseconds. + Weight::from_parts(1_984_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_138_000 picoseconds. - Weight::from_parts(16_447_000, 0) + // Minimum execution time: 16_425_000 picoseconds. + Weight::from_parts(16_680_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_099_000 picoseconds. - Weight::from_parts(16_592_000, 0) + // Minimum execution time: 16_171_000 picoseconds. + Weight::from_parts(16_564_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 17_972_000 picoseconds. - Weight::from_parts(18_379_000, 0) + // Minimum execution time: 17_785_000 picoseconds. + Weight::from_parts(18_123_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 23_554_000 picoseconds. - Weight::from_parts(24_446_000, 0) + // Minimum execution time: 23_903_000 picoseconds. + Weight::from_parts(24_769_000, 0) .saturating_add(Weight::from_parts(0, 6082)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_541_000 picoseconds. - Weight::from_parts(10_894_000, 0) + // Minimum execution time: 10_617_000 picoseconds. + Weight::from_parts(10_843_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_404_000 picoseconds. - Weight::from_parts(16_818_000, 0) + // Minimum execution time: 16_656_000 picoseconds. + Weight::from_parts(17_106_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `13507` - // Minimum execution time: 31_617_000 picoseconds. - Weight::from_parts(32_336_000, 0) + // Minimum execution time: 31_721_000 picoseconds. + Weight::from_parts(32_547_000, 0) .saturating_add(Weight::from_parts(0, 13507)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_328_000 picoseconds. - Weight::from_parts(3_501_000, 0) + // Minimum execution time: 3_439_000 picoseconds. + Weight::from_parts(3_619_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 23_571_000 picoseconds. - Weight::from_parts(24_312_000, 0) + // Minimum execution time: 24_657_000 picoseconds. + Weight::from_parts(24_971_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 32_879_000 picoseconds. - Weight::from_parts(33_385_000, 0) + // Minimum execution time: 34_028_000 picoseconds. + Weight::from_parts(34_697_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs index ac494fdc719f..fabce29b5fd9 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_935_000 picoseconds. - Weight::from_parts(18_482_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 18_311_000 picoseconds. - Weight::from_parts(18_850_000, 0) + // Minimum execution time: 17_830_000 picoseconds. + Weight::from_parts(18_411_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 56_182_000 picoseconds. - Weight::from_parts(58_136_000, 0) + // Minimum execution time: 55_456_000 picoseconds. + Weight::from_parts(56_808_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_979_000 picoseconds. - Weight::from_parts(6_289_000, 0) + // Minimum execution time: 5_996_000 picoseconds. + Weight::from_parts(6_154_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_853_000 picoseconds. - Weight::from_parts(2_045_000, 0) + // Minimum execution time: 1_768_000 picoseconds. + Weight::from_parts(1_914_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_827_000 picoseconds. - Weight::from_parts(24_493_000, 0) + // Minimum execution time: 24_120_000 picoseconds. + Weight::from_parts(24_745_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_755_000 picoseconds. - Weight::from_parts(27_125_000, 0) + // Minimum execution time: 26_630_000 picoseconds. + Weight::from_parts(27_289_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_898_000 picoseconds. - Weight::from_parts(2_028_000, 0) + // Minimum execution time: 1_821_000 picoseconds. + Weight::from_parts(1_946_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_300_000 picoseconds. - Weight::from_parts(16_995_000, 0) + // Minimum execution time: 16_586_000 picoseconds. + Weight::from_parts(16_977_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_495_000 picoseconds. - Weight::from_parts(16_950_000, 0) + // Minimum execution time: 16_923_000 picoseconds. + Weight::from_parts(17_415_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_153_000 picoseconds. - Weight::from_parts(18_595_000, 0) + // Minimum execution time: 18_596_000 picoseconds. + Weight::from_parts(18_823_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_387_000 picoseconds. - Weight::from_parts(24_677_000, 0) + // Minimum execution time: 23_817_000 picoseconds. + Weight::from_parts(24_520_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 10_939_000 picoseconds. - Weight::from_parts(11_210_000, 0) + // Minimum execution time: 11_042_000 picoseconds. + Weight::from_parts(11_578_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_850_000 picoseconds. - Weight::from_parts(17_195_000, 0) + // Minimum execution time: 17_306_000 picoseconds. + Weight::from_parts(17_817_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_931_000 picoseconds. - Weight::from_parts(32_494_000, 0) + // Minimum execution time: 32_141_000 picoseconds. + Weight::from_parts(32_954_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_514_000 picoseconds. - Weight::from_parts(3_709_000, 0) + // Minimum execution time: 3_410_000 picoseconds. + Weight::from_parts(3_556_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_863_000 picoseconds. - Weight::from_parts(25_293_000, 0) + // Minimum execution time: 25_021_000 picoseconds. + Weight::from_parts(25_240_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 33_799_000 picoseconds. - Weight::from_parts(34_665_000, 0) + // Minimum execution time: 33_801_000 picoseconds. + Weight::from_parts(34_655_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs index 62a9c802808c..c337289243b7 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("people-westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -62,28 +62,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 17_450_000 picoseconds. - Weight::from_parts(17_913_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 18_082_000 picoseconds. - Weight::from_parts(18_293_000, 0) + // Minimum execution time: 17_856_000 picoseconds. + Weight::from_parts(18_473_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -104,8 +84,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 54_939_000 picoseconds. - Weight::from_parts(55_721_000, 0) + // Minimum execution time: 56_112_000 picoseconds. + Weight::from_parts(57_287_000, 0) .saturating_add(Weight::from_parts(0, 3535)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) @@ -140,24 +120,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_789_000 picoseconds. - Weight::from_parts(5_995_000, 0) + // Minimum execution time: 6_186_000 picoseconds. + Weight::from_parts(6_420_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -167,8 +137,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_795_000 picoseconds. - Weight::from_parts(1_924_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_999_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -192,8 +162,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_445_000 picoseconds. - Weight::from_parts(23_906_000, 0) + // Minimum execution time: 23_833_000 picoseconds. + Weight::from_parts(24_636_000, 0) .saturating_add(Weight::from_parts(0, 3503)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(5)) @@ -216,8 +186,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 26_590_000 picoseconds. - Weight::from_parts(27_056_000, 0) + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(27_275_000, 0) .saturating_add(Weight::from_parts(0, 3720)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -228,8 +198,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_889_000 picoseconds. - Weight::from_parts(1_962_000, 0) + // Minimum execution time: 1_921_000 picoseconds. + Weight::from_parts(2_040_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -239,8 +209,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `89` // Estimated: `13454` - // Minimum execution time: 16_408_000 picoseconds. - Weight::from_parts(16_877_000, 0) + // Minimum execution time: 16_832_000 picoseconds. + Weight::from_parts(17_312_000, 0) .saturating_add(Weight::from_parts(0, 13454)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -251,8 +221,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `93` // Estimated: `13458` - // Minimum execution time: 16_791_000 picoseconds. - Weight::from_parts(17_111_000, 0) + // Minimum execution time: 16_687_000 picoseconds. + Weight::from_parts(17_123_000, 0) .saturating_add(Weight::from_parts(0, 13458)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -263,8 +233,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `15946` - // Minimum execution time: 18_355_000 picoseconds. - Weight::from_parts(19_110_000, 0) + // Minimum execution time: 18_164_000 picoseconds. + Weight::from_parts(18_580_000, 0) .saturating_add(Weight::from_parts(0, 15946)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -284,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 23_354_000 picoseconds. - Weight::from_parts(23_999_000, 0) + // Minimum execution time: 23_577_000 picoseconds. + Weight::from_parts(24_324_000, 0) .saturating_add(Weight::from_parts(0, 6046)) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) @@ -296,8 +266,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `11026` - // Minimum execution time: 11_065_000 picoseconds. - Weight::from_parts(11_302_000, 0) + // Minimum execution time: 11_014_000 picoseconds. + Weight::from_parts(11_223_000, 0) .saturating_add(Weight::from_parts(0, 11026)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -307,8 +277,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `100` // Estimated: `13465` - // Minimum execution time: 16_998_000 picoseconds. - Weight::from_parts(17_509_000, 0) + // Minimum execution time: 16_887_000 picoseconds. + Weight::from_parts(17_361_000, 0) .saturating_add(Weight::from_parts(0, 13465)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -329,8 +299,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 31_068_000 picoseconds. - Weight::from_parts(31_978_000, 0) + // Minimum execution time: 31_705_000 picoseconds. + Weight::from_parts(32_166_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) @@ -343,8 +313,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1517` - // Minimum execution time: 3_478_000 picoseconds. - Weight::from_parts(3_595_000, 0) + // Minimum execution time: 3_568_000 picoseconds. + Weight::from_parts(3_669_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -355,8 +325,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7669` // Estimated: `11134` - // Minimum execution time: 24_962_000 picoseconds. - Weight::from_parts(25_404_000, 0) + // Minimum execution time: 24_823_000 picoseconds. + Weight::from_parts(25_344_000, 0) .saturating_add(Weight::from_parts(0, 11134)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -367,8 +337,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 32_685_000 picoseconds. - Weight::from_parts(33_592_000, 0) + // Minimum execution time: 34_516_000 picoseconds. + Weight::from_parts(35_478_000, 0) .saturating_add(Weight::from_parts(0, 3555)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index cf364b6ac794..ac7100d78583 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -167,16 +167,11 @@ where }, ]); - let encoded_versioned_xcm = - VersionedXcm::V4(program).encode().try_into().map_err(|error| { - log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); - pallet_xcm::Error::::XcmTooLarge - })?; // send - let _ = >::send_blob( + let _ = >::send( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - encoded_versioned_xcm, + Box::new(VersionedXcm::V4(program)), )?; Ok(()) } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 42972baa1c83..5544ca44658c 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,26 +60,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 24_724_000 picoseconds. - Weight::from_parts(25_615_000, 0) - .saturating_add(Weight::from_parts(0, 3645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) - /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `180` - // Estimated: `3645` - // Minimum execution time: 24_709_000 picoseconds. - Weight::from_parts(25_326_000, 0) + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_682_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -98,8 +80,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 106_600_000 picoseconds. - Weight::from_parts(110_781_000, 0) + // Minimum execution time: 107_570_000 picoseconds. + Weight::from_parts(109_878_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -118,8 +100,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `232` // Estimated: `3697` - // Minimum execution time: 103_030_000 picoseconds. - Weight::from_parts(106_018_000, 0) + // Minimum execution time: 106_341_000 picoseconds. + Weight::from_parts(109_135_000, 0) .saturating_add(Weight::from_parts(0, 3697)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -138,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 107_017_000 picoseconds. - Weight::from_parts(109_214_000, 0) + // Minimum execution time: 108_372_000 picoseconds. + Weight::from_parts(112_890_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) @@ -148,16 +130,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_864_000 picoseconds. - Weight::from_parts(7_135_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_955_000 picoseconds. - Weight::from_parts(7_165_000, 0) + // Minimum execution time: 6_957_000 picoseconds. + Weight::from_parts(7_417_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) @@ -166,8 +140,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_827_000 picoseconds. - Weight::from_parts(7_211_000, 0) + // Minimum execution time: 7_053_000 picoseconds. + Weight::from_parts(7_462_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -175,8 +149,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_788_000 picoseconds. - Weight::from_parts(2_021_000, 0) + // Minimum execution time: 1_918_000 picoseconds. + Weight::from_parts(2_037_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -197,8 +171,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `180` // Estimated: `3645` - // Minimum execution time: 30_627_000 picoseconds. - Weight::from_parts(31_350_000, 0) + // Minimum execution time: 30_417_000 picoseconds. + Weight::from_parts(31_191_000, 0) .saturating_add(Weight::from_parts(0, 3645)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -219,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `360` // Estimated: `3825` - // Minimum execution time: 36_688_000 picoseconds. - Weight::from_parts(37_345_000, 0) + // Minimum execution time: 36_666_000 picoseconds. + Weight::from_parts(37_779_000, 0) .saturating_add(Weight::from_parts(0, 3825)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -231,8 +205,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_829_000 picoseconds. - Weight::from_parts(1_986_000, 0) + // Minimum execution time: 1_869_000 picoseconds. + Weight::from_parts(2_003_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -242,8 +216,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_104_000 picoseconds. - Weight::from_parts(16_464_000, 0) + // Minimum execution time: 16_188_000 picoseconds. + Weight::from_parts(16_435_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -254,8 +228,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_267_000 picoseconds. - Weight::from_parts(16_675_000, 0) + // Minimum execution time: 16_431_000 picoseconds. + Weight::from_parts(16_935_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -266,8 +240,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 18_487_000 picoseconds. - Weight::from_parts(19_102_000, 0) + // Minimum execution time: 18_460_000 picoseconds. + Weight::from_parts(18_885_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -285,8 +259,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `6156` - // Minimum execution time: 29_603_000 picoseconds. - Weight::from_parts(31_002_000, 0) + // Minimum execution time: 29_623_000 picoseconds. + Weight::from_parts(30_661_000, 0) .saturating_add(Weight::from_parts(0, 6156)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -297,8 +271,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 12_183_000 picoseconds. - Weight::from_parts(12_587_000, 0) + // Minimum execution time: 12_043_000 picoseconds. + Weight::from_parts(12_360_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -308,8 +282,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_372_000 picoseconds. - Weight::from_parts(16_967_000, 0) + // Minimum execution time: 16_511_000 picoseconds. + Weight::from_parts(17_011_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -328,8 +302,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `216` // Estimated: `13581` - // Minimum execution time: 38_904_000 picoseconds. - Weight::from_parts(39_983_000, 0) + // Minimum execution time: 39_041_000 picoseconds. + Weight::from_parts(39_883_000, 0) .saturating_add(Weight::from_parts(0, 13581)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -342,8 +316,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_067_000 picoseconds. - Weight::from_parts(2_195_000, 0) + // Minimum execution time: 2_030_000 picoseconds. + Weight::from_parts(2_150_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -354,8 +328,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 23_982_000 picoseconds. - Weight::from_parts(24_409_000, 0) + // Minimum execution time: 22_615_000 picoseconds. + Weight::from_parts(23_008_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -366,8 +340,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 33_430_000 picoseconds. - Weight::from_parts(34_433_000, 0) + // Minimum execution time: 34_438_000 picoseconds. + Weight::from_parts(35_514_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index d8741c939a50..71e6b696a20a 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -167,16 +167,11 @@ where }, ]); - let encoded_versioned_xcm = - VersionedXcm::V4(program).encode().try_into().map_err(|error| { - log::error!(target: "runtime::on_reap_identity", "XCM too large, error: {:?}", error); - pallet_xcm::Error::::XcmTooLarge - })?; // send - let _ = >::send_blob( + let _ = >::send( RawOrigin::Root.into(), Box::new(VersionedLocation::V4(destination)), - encoded_versioned_xcm, + Box::new(VersionedXcm::V4(program)), )?; Ok(()) } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 80bc551ba1e2..10725cecf249 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-03-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-02-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-h2rr8wx7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -60,26 +60,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 24_535_000 picoseconds. - Weight::from_parts(25_618_000, 0) - .saturating_add(Weight::from_parts(0, 3612)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) - /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) - /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `147` - // Estimated: `3612` - // Minimum execution time: 25_376_000 picoseconds. - Weight::from_parts(26_180_000, 0) + // Minimum execution time: 25_725_000 picoseconds. + Weight::from_parts(26_174_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -98,8 +80,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 108_786_000 picoseconds. - Weight::from_parts(112_208_000, 0) + // Minimum execution time: 113_140_000 picoseconds. + Weight::from_parts(116_204_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -118,8 +100,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `302` // Estimated: `6196` - // Minimum execution time: 105_190_000 picoseconds. - Weight::from_parts(107_140_000, 0) + // Minimum execution time: 108_571_000 picoseconds. + Weight::from_parts(110_650_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -138,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `250` // Estimated: `6196` - // Minimum execution time: 109_027_000 picoseconds. - Weight::from_parts(111_404_000, 0) + // Minimum execution time: 111_836_000 picoseconds. + Weight::from_parts(114_435_000, 0) .saturating_add(Weight::from_parts(0, 6196)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -154,24 +136,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute_blob() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_668_000 picoseconds. - Weight::from_parts(7_013_000, 0) + // Minimum execution time: 7_160_000 picoseconds. + Weight::from_parts(7_477_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +151,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_740_000 picoseconds. - Weight::from_parts(1_884_000, 0) + // Minimum execution time: 1_934_000 picoseconds. + Weight::from_parts(2_053_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) @@ -201,8 +173,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `147` // Estimated: `3612` - // Minimum execution time: 30_200_000 picoseconds. - Weight::from_parts(30_768_000, 0) + // Minimum execution time: 31_123_000 picoseconds. + Weight::from_parts(31_798_000, 0) .saturating_add(Weight::from_parts(0, 3612)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(5)) @@ -223,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `327` // Estimated: `3792` - // Minimum execution time: 33_928_000 picoseconds. - Weight::from_parts(35_551_000, 0) + // Minimum execution time: 35_175_000 picoseconds. + Weight::from_parts(36_098_000, 0) .saturating_add(Weight::from_parts(0, 3792)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(4)) @@ -235,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_759_000 picoseconds. - Weight::from_parts(1_880_000, 0) + // Minimum execution time: 1_974_000 picoseconds. + Weight::from_parts(2_096_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -246,8 +218,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `22` // Estimated: `13387` - // Minimum execution time: 16_507_000 picoseconds. - Weight::from_parts(17_219_000, 0) + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_170_000, 0) .saturating_add(Weight::from_parts(0, 13387)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -258,8 +230,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `26` // Estimated: `13391` - // Minimum execution time: 16_633_000 picoseconds. - Weight::from_parts(16_889_000, 0) + // Minimum execution time: 16_937_000 picoseconds. + Weight::from_parts(17_447_000, 0) .saturating_add(Weight::from_parts(0, 13391)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -270,8 +242,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `40` // Estimated: `15880` - // Minimum execution time: 19_297_000 picoseconds. - Weight::from_parts(19_820_000, 0) + // Minimum execution time: 19_157_000 picoseconds. + Weight::from_parts(19_659_000, 0) .saturating_add(Weight::from_parts(0, 15880)) .saturating_add(T::DbWeight::get().reads(6)) } @@ -289,8 +261,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `6123` - // Minimum execution time: 30_364_000 picoseconds. - Weight::from_parts(31_122_000, 0) + // Minimum execution time: 30_699_000 picoseconds. + Weight::from_parts(31_537_000, 0) .saturating_add(Weight::from_parts(0, 6123)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) @@ -301,8 +273,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `69` // Estimated: `10959` - // Minimum execution time: 11_997_000 picoseconds. - Weight::from_parts(12_392_000, 0) + // Minimum execution time: 12_303_000 picoseconds. + Weight::from_parts(12_670_000, 0) .saturating_add(Weight::from_parts(0, 10959)) .saturating_add(T::DbWeight::get().reads(4)) } @@ -312,8 +284,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `33` // Estimated: `13398` - // Minimum execution time: 16_894_000 picoseconds. - Weight::from_parts(17_452_000, 0) + // Minimum execution time: 17_129_000 picoseconds. + Weight::from_parts(17_668_000, 0) .saturating_add(Weight::from_parts(0, 13398)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) @@ -332,8 +304,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `183` // Estimated: `13548` - // Minimum execution time: 39_864_000 picoseconds. - Weight::from_parts(40_859_000, 0) + // Minimum execution time: 39_960_000 picoseconds. + Weight::from_parts(41_068_000, 0) .saturating_add(Weight::from_parts(0, 13548)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) @@ -346,8 +318,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1485` - // Minimum execution time: 2_363_000 picoseconds. - Weight::from_parts(2_519_000, 0) + // Minimum execution time: 2_333_000 picoseconds. + Weight::from_parts(2_504_000, 0) .saturating_add(Weight::from_parts(0, 1485)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) @@ -358,8 +330,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `7576` // Estimated: `11041` - // Minimum execution time: 22_409_000 picoseconds. - Weight::from_parts(22_776_000, 0) + // Minimum execution time: 22_932_000 picoseconds. + Weight::from_parts(23_307_000, 0) .saturating_add(Weight::from_parts(0, 11041)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -370,8 +342,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `23` // Estimated: `3488` - // Minimum execution time: 33_551_000 picoseconds. - Weight::from_parts(34_127_000, 0) + // Minimum execution time: 34_558_000 picoseconds. + Weight::from_parts(35_299_000, 0) .saturating_add(Weight::from_parts(0, 3488)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 5d2e0f7b96f9..081a4235b779 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,7 +16,6 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; use frame_support::{assert_ok, weights::Weight}; use frame_system::RawOrigin; @@ -101,21 +100,6 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) - send_blob { - let send_origin = - T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - if T::SendXcmOrigin::try_origin(send_origin.clone()).is_err() { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - let msg = Xcm::<()>(vec![ClearOrigin]); - let versioned_dest: VersionedLocation = T::reachable_dest().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); - let versioned_msg = VersionedXcm::from(msg); - let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); - }: _>(send_origin, Box::new(versioned_dest), encoded_versioned_msg) - teleport_assets { let (asset, destination) = T::teleportable_asset_and_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), @@ -263,19 +247,6 @@ benchmarks! { let versioned_msg = VersionedXcm::from(msg); }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) - execute_blob { - let execute_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(execute_origin.clone()) - .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - let msg = Xcm(vec![ClearOrigin]); - if !T::XcmExecuteFilter::contains(&(origin_location, msg.clone())) { - return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) - } - let versioned_msg = VersionedXcm::from(msg); - let encoded_versioned_msg = versioned_msg.encode().try_into().unwrap(); - }: _>(execute_origin, encoded_versioned_msg, Weight::MAX) - force_xcm_version { let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index f6c301d5b04e..af3b66121ea1 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -50,8 +50,8 @@ use sp_runtime::{ use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; use xcm_builder::{ - ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, - QueryControllerWeightInfo, SendController, SendControllerWeightInfo, + ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, + SendController, SendControllerWeightInfo, }; use xcm_executor::{ traits::{ @@ -87,8 +87,6 @@ pub trait WeightInfo { fn new_query() -> Weight; fn take_response() -> Weight; fn claim_assets() -> Weight; - fn execute_blob() -> Weight; - fn send_blob() -> Weight; } /// fallback implementation @@ -173,14 +171,6 @@ impl WeightInfo for TestWeightInfo { fn claim_assets() -> Weight { Weight::from_parts(100_000_000, 0) } - - fn execute_blob() -> Weight { - Weight::from_parts(100_000_000, 0) - } - - fn send_blob() -> Weight { - Weight::from_parts(100_000_000, 0) - } } #[frame_support::pallet] @@ -296,49 +286,76 @@ pub mod pallet { } impl ExecuteControllerWeightInfo for Pallet { - fn execute_blob() -> Weight { - T::WeightInfo::execute_blob() + fn execute() -> Weight { + T::WeightInfo::execute() } } impl ExecuteController, ::RuntimeCall> for Pallet { type WeightInfo = Self; - fn execute_blob( + fn execute( origin: OriginFor, - encoded_message: BoundedVec, + message: Box::RuntimeCall>>, max_weight: Weight, ) -> Result { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let message = - VersionedXcm::<::RuntimeCall>::decode(&mut &encoded_message[..]) - .map_err(|error| { - log::error!(target: "xcm::execute_blob", "Unable to decode XCM, error: {:?}", error); - Error::::UnableToDecode - })?; - Self::execute_base(origin_location, Box::new(message), max_weight) + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); + let outcome = (|| { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let mut hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + Ok(T::XcmExecutor::prepare_and_execute( + origin_location, + message, + &mut hash, + max_weight, + max_weight, + )) + })() + .map_err(|e: DispatchError| { + e.with_weight(::execute()) + })?; + + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete.with_weight( + weight_used.saturating_add( + ::execute(), + ), + ) + })?; + Ok(weight_used) } } impl SendControllerWeightInfo for Pallet { - fn send_blob() -> Weight { - T::WeightInfo::send_blob() + fn send() -> Weight { + T::WeightInfo::send() } } impl SendController> for Pallet { type WeightInfo = Self; - fn send_blob( + fn send( origin: OriginFor, dest: Box, - encoded_message: BoundedVec, + message: Box>, ) -> Result { let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let message = - VersionedXcm::<()>::decode(&mut &encoded_message[..]).map_err(|error| { - log::error!(target: "xcm::send_blob", "Unable to decode XCM, error: {:?}", error); - Error::::UnableToDecode - })?; - Self::send_base(origin_location, dest, Box::new(message)) + let interior: Junctions = + origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) + .map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) } } @@ -547,13 +564,6 @@ pub mod pallet { /// Local XCM execution incomplete. #[codec(index = 24)] LocalExecutionIncomplete, - /// Could not decode XCM. - #[codec(index = 25)] - UnableToDecode, - /// XCM encoded length is too large. - /// Returned when an XCM encoded length is larger than `MaxXcmEncodedSize`. - #[codec(index = 26)] - XcmTooLarge, } impl From for Error { @@ -890,72 +900,15 @@ pub mod pallet { } } - impl Pallet { - /// Underlying logic for both [`execute_blob`] and [`execute`]. - fn execute_base( - origin_location: Location, - message: Box::RuntimeCall>>, - max_weight: Weight, - ) -> Result { - log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); - let outcome = (|| { - let mut hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - Ok(T::XcmExecutor::prepare_and_execute( - origin_location, - message, - &mut hash, - max_weight, - max_weight, - )) - })() - .map_err(|e: DispatchError| e.with_weight(T::WeightInfo::execute()))?; - - Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - let weight_used = outcome.weight_used(); - outcome.ensure_complete().map_err(|error| { - log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); - Error::::LocalExecutionIncomplete - .with_weight(weight_used.saturating_add(T::WeightInfo::execute())) - })?; - Ok(weight_used) - } - - /// Underlying logic for both [`send_blob`] and [`send`]. - fn send_base( - origin_location: Location, - dest: Box, - message: Box>, - ) -> Result { - let interior: Junctions = - origin_location.clone().try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = Location::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = Self::send_xcm(interior, dest.clone(), message.clone()) - .map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); - Ok(message_id) - } - } - #[pallet::call(weight(::WeightInfo))] impl Pallet { - /// WARNING: DEPRECATED. `send` will be removed after June 2024. Use `send_blob` instead. - #[allow(deprecated)] - #[deprecated(note = "`send` will be removed after June 2024. Use `send_blob` instead.")] #[pallet::call_index(0)] pub fn send( origin: OriginFor, dest: Box, message: Box>, ) -> DispatchResult { - let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - Self::send_base(origin_location, dest, message)?; + >::send(origin, dest, message)?; Ok(()) } @@ -1052,13 +1005,6 @@ pub mod pallet { /// No more than `max_weight` will be used in its attempted execution. If this is less than /// the maximum amount of weight that the message could take to be executed, then no /// execution attempt will be made. - /// - /// WARNING: DEPRECATED. `execute` will be removed after June 2024. Use `execute_blob` - /// instead. - #[allow(deprecated)] - #[deprecated( - note = "`execute` will be removed after June 2024. Use `execute_blob` instead." - )] #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -1066,8 +1012,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let weight_used = Self::execute_base(origin_location, message, max_weight)?; + let weight_used = + >::execute(origin, message, max_weight)?; Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } @@ -1362,47 +1308,6 @@ pub mod pallet { Ok(()) } - /// Execute an XCM from a local, signed, origin. - /// - /// An event is deposited indicating whether the message could be executed completely - /// or only partially. - /// - /// No more than `max_weight` will be used in its attempted execution. If this is less than - /// the maximum amount of weight that the message could take to be executed, then no - /// execution attempt will be made. - /// - /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. - #[pallet::call_index(13)] - #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute_blob()))] - pub fn execute_blob( - origin: OriginFor, - encoded_message: BoundedVec, - max_weight: Weight, - ) -> DispatchResultWithPostInfo { - let weight_used = >::execute_blob( - origin, - encoded_message, - max_weight, - )?; - Ok(Some(weight_used.saturating_add(T::WeightInfo::execute_blob())).into()) - } - - /// Send an XCM from a local, signed, origin. - /// - /// The destination, `dest`, will receive this message with a `DescendOrigin` instruction - /// that makes the origin of the message be the origin on this system. - /// - /// The message is passed in encoded. It needs to be decodable as a [`VersionedXcm`]. - #[pallet::call_index(14)] - pub fn send_blob( - origin: OriginFor, - dest: Box, - encoded_message: BoundedVec, - ) -> DispatchResult { - >::send_blob(origin, dest, encoded_message)?; - Ok(()) - } - /// Transfer assets from the local chain to the destination chain using explicit transfer /// types for assets and fees. /// @@ -1451,7 +1356,7 @@ pub mod pallet { /// - `custom_xcm_on_dest`: The XCM to be executed on `dest` chain as the last step of the /// transfer, which also determines what happens to the assets on the destination chain. /// - `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase. - #[pallet::call_index(15)] + #[pallet::call_index(13)] #[pallet::weight(T::WeightInfo::transfer_assets())] pub fn transfer_assets_using_type_and_then( origin: OriginFor, diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 1147635081a4..8faf16e0d2a9 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -20,10 +20,10 @@ pub(crate) mod assets_transfer; use crate::{ mock::*, pallet::SupportedVersion, AssetTraps, Config, CurrentMigration, Error, - LatestVersionedLocation, Pallet, Queries, QueryStatus, VersionDiscoveryQueue, - VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, WeightInfo, + ExecuteControllerWeightInfo, LatestVersionedLocation, Pallet, Queries, QueryStatus, + VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, VersionNotifyTargets, + WeightInfo, }; -use codec::Encode; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, traits::{Currency, Hooks}, @@ -305,12 +305,11 @@ fn send_works() { ]); let versioned_dest = Box::new(RelayLocation::get().into()); - let versioned_message = VersionedXcm::from(message.clone()); - let encoded_versioned_message = versioned_message.encode().try_into().unwrap(); - assert_ok!(XcmPallet::send_blob( + let versioned_message = Box::new(VersionedXcm::from(message.clone())); + assert_ok!(XcmPallet::send( RuntimeOrigin::signed(ALICE), versioned_dest, - encoded_versioned_message + versioned_message )); let sent_message = Xcm(Some(DescendOrigin(sender.clone().try_into().unwrap())) .into_iter() @@ -342,16 +341,16 @@ fn send_fails_when_xcm_router_blocks() { ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let message = Xcm::<()>(vec![ + let message = Xcm(vec![ ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), buy_execution((Parent, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: sender }, ]); assert_noop!( - XcmPallet::send_blob( + XcmPallet::send( RuntimeOrigin::signed(ALICE), Box::new(Location::ancestor(8).into()), - VersionedXcm::from(message.clone()).encode().try_into().unwrap(), + Box::new(VersionedXcm::from(message.clone())), ), crate::Error::::SendFailure ); @@ -372,16 +371,13 @@ fn execute_withdraw_to_deposit_works() { let weight = BaseXcmWeight::get() * 3; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -403,21 +399,18 @@ fn trapped_assets_can_be_claimed() { let weight = BaseXcmWeight::get() * 6; let dest: Location = Junction::AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ WithdrawAsset((Here, SEND_AMOUNT).into()), buy_execution((Here, SEND_AMOUNT)), // Don't propagated the error into the result. - SetErrorHandler(Xcm::(vec![ClearError])), + SetErrorHandler(Xcm(vec![ClearError])), // This will make an error. Trap(0), // This would succeed, but we never get to it. DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight )); let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); @@ -444,16 +437,13 @@ fn trapped_assets_can_be_claimed() { assert_eq!(trapped, expected); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest.clone() }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight )); @@ -463,16 +453,13 @@ fn trapped_assets_can_be_claimed() { // Can't claim twice. assert_err_ignore_postinfo!( - XcmPallet::execute_blob( + XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, buy_execution((Here, SEND_AMOUNT)), DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight ), Error::::LocalExecutionIncomplete @@ -489,9 +476,9 @@ fn claim_assets_works() { let trapping_program = Xcm::::builder_unsafe().withdraw_asset((Here, SEND_AMOUNT)).build(); // Even though assets are trapped, the extrinsic returns success. - assert_ok!(XcmPallet::execute_blob( + assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::V4(trapping_program).encode().try_into().unwrap(), + Box::new(VersionedXcm::V4(trapping_program)), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); @@ -544,9 +531,9 @@ fn incomplete_execute_reverts_side_effects() { assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); let assets: Assets = (Here, amount_to_send).into(); - let result = XcmPallet::execute_blob( + let result = XcmPallet::execute( RuntimeOrigin::signed(ALICE), - VersionedXcm::from(Xcm::(vec![ + Box::new(VersionedXcm::from(Xcm(vec![ // Withdraw + BuyExec + Deposit should work WithdrawAsset(assets.clone()), buy_execution(assets.inner()[0].clone()), @@ -554,10 +541,7 @@ fn incomplete_execute_reverts_side_effects() { // Withdrawing once more will fail because of InsufficientBalance, and we expect to // revert the effects of the above instructions as well WithdrawAsset(assets), - ])) - .encode() - .try_into() - .unwrap(), + ]))), weight, ); // all effects are reverted and balances unchanged for either sender or receiver @@ -569,7 +553,7 @@ fn incomplete_execute_reverts_side_effects() { Err(sp_runtime::DispatchErrorWithPostInfo { post_info: frame_support::dispatch::PostDispatchInfo { actual_weight: Some( - <::WeightInfo>::execute_blob() + weight + as ExecuteControllerWeightInfo>::execute() + weight ), pays_fee: frame_support::dispatch::Pays::Yes, }, diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 198020ea1261..513dfe5501ba 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -48,9 +48,6 @@ mod tests; /// Maximum nesting level for XCM decoding. pub const MAX_XCM_DECODE_DEPTH: u32 = 8; -/// Maximum encoded size. -/// See `decoding_respects_limit` test for more reasoning behind this value. -pub const MAX_XCM_ENCODED_SIZE: u32 = 12402; /// A version of XCM. pub type Version = u32; diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 6635408282e4..30ee485589a2 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -1488,21 +1488,7 @@ mod tests { let encoded = big_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let mut many_assets = Assets::new(); - for index in 0..MAX_ITEMS_IN_ASSETS { - many_assets.push((GeneralIndex(index as u128), 1u128).into()); - } - - let full_xcm_pass = - Xcm::<()>(vec![ - TransferAsset { assets: many_assets, beneficiary: Here.into() }; - MAX_INSTRUCTIONS_TO_DECODE as usize - ]); - let encoded = full_xcm_pass.encode(); - assert_eq!(encoded.len(), 12402); - assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); - - let nested_xcm_fail = Xcm::<()>(vec![ + let nested_xcm = Xcm::<()>(vec![ DepositReserveAsset { assets: All.into(), dest: Here.into(), @@ -1510,10 +1496,10 @@ mod tests { }; (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize ]); - let encoded = nested_xcm_fail.encode(); + let encoded = nested_xcm.encode(); assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); - let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm_fail); 64]); + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); let encoded = even_more_nested_xcm.encode(); assert_eq!(encoded.len(), 342530); // This should not decode since the limit is 100 diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 6bdde2a967de..04b19eaa5870 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,7 +21,6 @@ use frame_support::{ dispatch::{DispatchErrorWithPostInfo, WithPostDispatchInfo}, pallet_prelude::DispatchError, - parameter_types, BoundedVec, }; use sp_std::boxed::Box; use xcm::prelude::*; @@ -42,12 +41,8 @@ impl Controller f /// Weight functions needed for [`ExecuteController`]. pub trait ExecuteControllerWeightInfo { - /// Weight for [`ExecuteController::execute_blob`] - fn execute_blob() -> Weight; -} - -parameter_types! { - pub const MaxXcmEncodedSize: u32 = xcm::MAX_XCM_ENCODED_SIZE; + /// Weight for [`ExecuteController::execute`] + fn execute() -> Weight; } /// Execute an XCM locally, for a given origin. @@ -66,19 +61,19 @@ pub trait ExecuteController { /// # Parameters /// /// - `origin`: the origin of the call. - /// - `msg`: the encoded XCM to be executed, should be decodable as a [`VersionedXcm`] + /// - `message`: the XCM program to be executed. /// - `max_weight`: the maximum weight that can be consumed by the execution. - fn execute_blob( + fn execute( origin: Origin, - message: BoundedVec, + message: Box>, max_weight: Weight, ) -> Result; } /// Weight functions needed for [`SendController`]. pub trait SendControllerWeightInfo { - /// Weight for [`SendController::send_blob`] - fn send_blob() -> Weight; + /// Weight for [`SendController::send`] + fn send() -> Weight; } /// Send an XCM from a given origin. @@ -98,11 +93,11 @@ pub trait SendController { /// /// - `origin`: the origin of the call. /// - `dest`: the destination of the message. - /// - `msg`: the encoded XCM to be sent, should be decodable as a [`VersionedXcm`] - fn send_blob( + /// - `msg`: the XCM to be sent. + fn send( origin: Origin, dest: Box, - message: BoundedVec, + message: Box>, ) -> Result; } @@ -142,35 +137,35 @@ pub trait QueryController: QueryHandler { impl ExecuteController for () { type WeightInfo = (); - fn execute_blob( + fn execute( _origin: Origin, - _message: BoundedVec, + _message: Box>, _max_weight: Weight, ) -> Result { - Err(DispatchError::Other("ExecuteController::execute_blob not implemented") + Err(DispatchError::Other("ExecuteController::execute not implemented") .with_weight(Weight::zero())) } } impl ExecuteControllerWeightInfo for () { - fn execute_blob() -> Weight { + fn execute() -> Weight { Weight::zero() } } impl SendController for () { type WeightInfo = (); - fn send_blob( + fn send( _origin: Origin, _dest: Box, - _message: BoundedVec, + _message: Box>, ) -> Result { Ok(Default::default()) } } impl SendControllerWeightInfo for () { - fn send_blob() -> Weight { + fn send() -> Weight { Weight::zero() } } diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 977da9a55de7..1ba38d0db836 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -44,7 +44,7 @@ pub use barriers::{ mod controller; pub use controller::{ - Controller, ExecuteController, ExecuteControllerWeightInfo, MaxXcmEncodedSize, QueryController, + Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs index bf3c00b3ff1f..20fdd9a243d1 100644 --- a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -14,9 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; -use crate::parachain::RuntimeHoldReason; -use frame_support::{derive_impl, parameter_types}; +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent, RuntimeHoldReason}; +use frame_support::{derive_impl, parameter_types, traits::Contains}; parameter_types! { pub Schedule: pallet_contracts::Schedule = Default::default(); @@ -29,5 +28,14 @@ impl pallet_contracts::Config for Runtime { type Currency = Balances; type Schedule = Schedule; type Time = super::Timestamp; + type CallFilter = CallFilter; type Xcm = pallet_xcm::Pallet; } + +/// In this mock, we only allow other contract calls via XCM. +pub struct CallFilter; +impl Contains for CallFilter { + fn contains(call: &RuntimeCall) -> bool { + matches!(call, RuntimeCall::Contracts(pallet_contracts::Call::call { .. })) + } +} diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index 5632f75e7873..e7d1f6279aa3 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -22,7 +22,10 @@ use crate::{ relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, }; use codec::{Decode, Encode}; -use frame_support::traits::{fungibles::Mutate, Currency}; +use frame_support::{ + assert_err, + traits::{fungibles::Mutate, Currency}, +}; use pallet_contracts::{test_utils::builder::*, Code}; use pallet_contracts_fixtures::compile_module; use pallet_contracts_uapi::ReturnErrorCode; @@ -81,7 +84,7 @@ fn test_xcm_execute() { .build(); let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode().encode()) + .data(VersionedXcm::V4(message).encode()) .build(); assert_eq!(result.gas_consumed, result.gas_required); @@ -118,7 +121,7 @@ fn test_xcm_execute_incomplete() { .build(); let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode().encode()) + .data(VersionedXcm::V4(message).encode()) .build(); assert_eq!(result.gas_consumed, result.gas_required); @@ -129,6 +132,26 @@ fn test_xcm_execute_incomplete() { }); } +#[test] +fn test_xcm_execute_filtered_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + // `remark` should be rejected, as it is not allowed by our CallFilter. + let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let message: Xcm = Xcm::builder_unsafe() + .transact(OriginKind::Native, Weight::MAX, call.encode()) + .build(); + let result = bare_call(contract_addr.clone()) + .data(VersionedXcm::V4(message).encode()) + .build() + .result; + assert_err!(result, frame_system::Error::::CallFiltered); + }); +} + #[test] fn test_xcm_execute_reentrant_call() { MockNet::reset(); @@ -151,7 +174,7 @@ fn test_xcm_execute_reentrant_call() { .build(); let result = bare_call(contract_addr.clone()) - .data(VersionedXcm::V4(message).encode().encode()) + .data(VersionedXcm::V4(message).encode()) .build_and_unwrap_result(); assert_return_code!(&result, ReturnErrorCode::XcmExecutionFailed); @@ -182,7 +205,7 @@ fn test_xcm_send() { .build(); let result = bare_call(contract_addr.clone()) - .data((dest, VersionedXcm::V4(message).encode()).encode()) + .data((dest, VersionedXcm::V4(message)).encode()) .build_and_unwrap_result(); let mut data = &result.data[..]; diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index b381fd2dc4f0..20cf7d1651cc 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -307,9 +307,6 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. - /// - /// This filter does not apply to XCM transact calls. To impose restrictions on XCM transact - /// calls, you must configure them separately within the XCM pallet itself. #[pallet::no_default_bounds] type CallFilter: Contains<::RuntimeCall>; diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 28a08ab0224d..160dfa0d2f36 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -25,8 +25,12 @@ use crate::{ }; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchInfo, ensure, pallet_prelude::DispatchResultWithPostInfo, parameter_types, - traits::Get, weights::Weight, + dispatch::DispatchInfo, + ensure, + pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, + parameter_types, + traits::Get, + weights::Weight, }; use pallet_contracts_proc_macro::define_env; use pallet_contracts_uapi::{CallFlags, ReturnFlags}; @@ -37,6 +41,9 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; +use xcm::VersionedXcm; + +type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -371,6 +378,29 @@ fn already_charged(_: u32) -> Option { None } +/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] +/// instruction with a call that is not allowed by the CallFilter. +fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { + use frame_support::traits::Contains; + use xcm::prelude::{Transact, Xcm}; + + let mut message: Xcm> = + message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; + + message.iter_mut().try_for_each(|inst| -> DispatchResult { + let Transact { ref mut call, .. } = inst else { return Ok(()) }; + let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; + + if !::CallFilter::contains(call) { + return Err(frame_system::Error::::CallFiltered.into()) + } + + Ok(()) + })?; + + Ok(()) +} + /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -2082,13 +2112,16 @@ pub mod env { msg_len: u32, ) -> Result { use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; - let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let message: VersionedXcm> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + ensure_executable::(&message)?; let execute_weight = - <::Xcm as ExecuteController<_, _>>::WeightInfo::execute_blob(); + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); let dispatch_info = DispatchInfo { weight, ..Default::default() }; @@ -2097,9 +2130,9 @@ pub mod env { RuntimeCosts::CallXcmExecute, |ctx| { let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - let weight_used = <::Xcm>::execute_blob( + let weight_used = <::Xcm>::execute( origin, - message, + Box::new(message), weight.saturating_sub(execute_weight), )?; @@ -2119,18 +2152,19 @@ pub mod env { msg_len: u32, output_ptr: u32, ) -> Result { - use xcm::VersionedLocation; + use xcm::{VersionedLocation, VersionedXcm}; use xcm_builder::{SendController, SendControllerWeightInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; let dest: VersionedLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; - let message = ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; - let weight = <::Xcm as SendController<_>>::WeightInfo::send_blob(); + let message: VersionedXcm<()> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); - match <::Xcm>::send_blob(origin, dest.into(), message) { + match <::Xcm>::send(origin, dest.into(), message.into()) { Ok(message_id) => { ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; Ok(ReturnErrorCode::Success) diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs index 459cb59bead9..92065eda5d63 100644 --- a/substrate/frame/contracts/uapi/src/host.rs +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -790,7 +790,7 @@ pub trait HostFn { /// /// # Parameters /// - /// - `dest`: The XCM destination, should be decodable as [MultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), + /// - `dest`: The XCM destination, should be decodable as [VersionedLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedLocation.html), /// traps otherwise. /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), /// traps otherwise. From a633e954f3b88697aa797d9792e8a5b5cf310b7e Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 25 Apr 2024 08:26:16 +0300 Subject: [PATCH 52/74] Bridge: make some headers submissions free (#4102) supersedes https://github.com/paritytech/parity-bridges-common/pull/2873 Draft because of couple of TODOs: - [x] fix remaining TODOs; - [x] double check that all changes from https://github.com/paritytech/parity-bridges-common/pull/2873 are correctly ported; - [x] create a separate PR (on top of that one or a follow up?) for https://github.com/paritytech/polkadot-sdk/tree/sv-try-new-bridge-fees; - [x] fix compilation issues (haven't checked, but there should be many). --------- Co-authored-by: Adrian Catangiu --- .gitlab/pipeline/zombienet.yml | 4 +- Cargo.lock | 9 + bridges/bin/runtime-common/Cargo.toml | 2 + .../extensions/check_obsolete_extension.rs | 577 ++++++++++++++-- .../src/extensions/priority_calculator.rs | 433 ++++++++---- .../extensions/refund_relayer_extension.rs | 627 +++++++++++++----- bridges/bin/runtime-common/src/mock.rs | 4 +- .../chain-bridge-hub-cumulus/src/lib.rs | 3 + .../chains/chain-bridge-hub-kusama/src/lib.rs | 1 + .../chain-bridge-hub-polkadot/src/lib.rs | 1 + .../chains/chain-bridge-hub-rococo/src/lib.rs | 5 +- .../chain-bridge-hub-westend/src/lib.rs | 9 +- bridges/modules/grandpa/src/call_ext.rs | 358 +++++++++- bridges/modules/grandpa/src/lib.rs | 249 +++++-- bridges/modules/grandpa/src/mock.rs | 6 +- bridges/modules/grandpa/src/weights_ext.rs | 58 ++ bridges/modules/parachains/src/call_ext.rs | 299 +++++++-- bridges/modules/parachains/src/lib.rs | 344 ++++++++-- bridges/modules/parachains/src/mock.rs | 11 +- bridges/modules/parachains/src/weights_ext.rs | 27 +- bridges/primitives/parachains/src/lib.rs | 19 + bridges/primitives/runtime/src/chain.rs | 20 + .../relays/client-substrate/src/test_chain.rs | 1 + .../bridge_hub_rococo_local_network.toml | 8 +- .../bridge_hub_westend_local_network.toml | 8 +- .../rococo-westend/bridges_rococo_westend.sh | 98 +++ .../environments/rococo-westend/explorers.sh | 11 + .../environments/rococo-westend/helper.sh | 8 +- .../environments/rococo-westend/spawn.sh | 4 +- .../rococo-westend/start_relayer.sh | 26 +- .../js-helpers/native-asset-balance.js | 12 + .../roc-reaches-westend.zndsl | 6 +- .../roc-relayer-balance-does-not-change.zndsl | 11 + .../testing/tests/0001-asset-transfer/run.sh | 6 + .../wnd-reaches-rococo.zndsl | 6 +- .../wnd-relayer-balance-does-not-change.zndsl | 11 + .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 + .../src/bridge_common_config.rs | 6 +- .../src/bridge_to_bulletin_config.rs | 27 +- .../src/bridge_to_westend_config.rs | 34 +- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 47 +- .../bridge-hub-rococo/src/weights/mod.rs | 20 + .../bridge-hub-rococo/tests/tests.rs | 45 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 + .../src/bridge_to_rococo_config.rs | 34 +- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 34 +- .../bridge-hub-westend/src/weights/mod.rs | 20 + .../bridge-hub-westend/tests/tests.rs | 33 +- .../src/test_cases/from_grandpa_chain.rs | 262 +++++++- .../src/test_cases/from_parachain.rs | 298 +++++++++ .../test-utils/src/test_cases/helpers.rs | 28 + .../src/test_data/from_grandpa_chain.rs | 69 +- .../src/test_data/from_parachain.rs | 59 +- prdoc/pr_4102.prdoc | 43 ++ 54 files changed, 3730 insertions(+), 615 deletions(-) create mode 100644 bridges/modules/grandpa/src/weights_ext.rs create mode 100755 bridges/testing/environments/rococo-westend/explorers.sh create mode 100644 bridges/testing/framework/js-helpers/native-asset-balance.js create mode 100644 bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl create mode 100644 bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl create mode 100644 prdoc/pr_4102.prdoc diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 52948e1eb719..e306cb43c027 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -12,4 +12,6 @@ include: # polkadot tests - .gitlab/pipeline/zombienet/polkadot.yml # bridges tests - - .gitlab/pipeline/zombienet/bridges.yml + # TODO: https://github.com/paritytech/parity-bridges-common/pull/2884 + # commenting until we have a new relatye, compatible with updated fees scheme + # - .gitlab/pipeline/zombienet/bridges.yml diff --git a/Cargo.lock b/Cargo.lock index ad7729d4b30e..d64800fb085e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2153,6 +2153,7 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", + "tuplex", ] [[package]] @@ -2311,6 +2312,7 @@ dependencies = [ "static_assertions", "substrate-wasm-builder", "testnet-parachains-constants", + "tuplex", "westend-runtime-constants", ] @@ -2349,6 +2351,7 @@ dependencies = [ "staging-xcm", "staging-xcm-builder", "static_assertions", + "tuplex", ] [[package]] @@ -22046,6 +22049,12 @@ dependencies = [ "utf-8", ] +[[package]] +name = "tuplex" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676ac81d5454c4dcf37955d34fa8626ede3490f744b86ca14a7b90168d2a08aa" + [[package]] name = "twox-hash" version = "1.6.3" diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 67b91a16a302..74049031afe6 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -16,6 +16,7 @@ hash-db = { version = "0.16.0", default-features = false } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } static_assertions = { version = "1.1", optional = true } +tuplex = { version = "0.1", default-features = false } # Bridge dependencies @@ -82,6 +83,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-trie/std", + "tuplex/std", "xcm-builder/std", "xcm/std", ] diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index 4b0c052df800..2c152aef6822 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -18,55 +18,229 @@ //! obsolete (duplicated) data or do not pass some additional pallet-specific //! checks. -use crate::messages_call_ext::MessagesCallSubType; -use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; -use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; -use sp_runtime::transaction_validity::TransactionValidity; +use crate::{ + extensions::refund_relayer_extension::RefundableParachainId, + messages_call_ext::MessagesCallSubType, +}; +use bp_relayers::ExplicitOrAccountParams; +use bp_runtime::Parachain; +use pallet_bridge_grandpa::{ + BridgedBlockNumber, CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, +}; +use pallet_bridge_parachains::{ + CallSubType as ParachainsCallSubtype, SubmitParachainHeadsHelper, SubmitParachainHeadsInfo, +}; +use pallet_bridge_relayers::Pallet as RelayersPallet; +use sp_runtime::{ + traits::{Get, PhantomData, UniqueSaturatedInto}, + transaction_validity::{TransactionPriority, TransactionValidity, ValidTransactionBuilder}, +}; /// A duplication of the `FilterCall` trait. /// /// We need this trait in order to be able to implement it for the messages pallet, /// since the implementation is done outside of the pallet crate. -pub trait BridgeRuntimeFilterCall { - /// Checks if a runtime call is valid. - fn validate(call: &Call) -> TransactionValidity; +pub trait BridgeRuntimeFilterCall { + /// Data that may be passed from the validate to `post_dispatch`. + type ToPostDispatch; + /// Called during validation. Needs to checks whether a runtime call, submitted + /// by the `who` is valid. `who` may be `None` if transaction is not signed + /// by a regular account. + fn validate(who: &AccountId, call: &Call) -> (Self::ToPostDispatch, TransactionValidity); + /// Called after transaction is dispatched. + fn post_dispatch(_who: &AccountId, _has_failed: bool, _to_post_dispatch: Self::ToPostDispatch) { + } +} + +/// Wrapper for the bridge GRANDPA pallet that checks calls for obsolete submissions +/// and also boosts transaction priority if it has submitted by registered relayer. +/// The boost is computed as +/// `(BundledHeaderNumber - 1 - BestFinalizedHeaderNumber) * Priority::get()`. +/// The boost is only applied if submitter has active registration in the relayers +/// pallet. +pub struct CheckAndBoostBridgeGrandpaTransactions( + PhantomData<(T, I, Priority, SlashAccount)>, +); + +impl, SlashAccount: Get> + BridgeRuntimeFilterCall + for CheckAndBoostBridgeGrandpaTransactions +where + T: pallet_bridge_relayers::Config + pallet_bridge_grandpa::Config, + T::RuntimeCall: GrandpaCallSubType, +{ + // bridged header number, bundled in transaction + type ToPostDispatch = Option>; + + fn validate( + who: &T::AccountId, + call: &T::RuntimeCall, + ) -> (Self::ToPostDispatch, TransactionValidity) { + match GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) { + Ok(Some(our_tx)) => { + let to_post_dispatch = Some(our_tx.base.block_number); + let total_priority_boost = + compute_priority_boost::(who, our_tx.improved_by); + ( + to_post_dispatch, + ValidTransactionBuilder::default().priority(total_priority_boost).build(), + ) + }, + Ok(None) => (None, ValidTransactionBuilder::default().build()), + Err(e) => (None, Err(e)), + } + } + + fn post_dispatch( + relayer: &T::AccountId, + has_failed: bool, + bundled_block_number: Self::ToPostDispatch, + ) { + // we are only interested in associated pallet submissions + let Some(bundled_block_number) = bundled_block_number else { return }; + // we are only interested in failed or unneeded transactions + let has_failed = + has_failed || !SubmitFinalityProofHelper::::was_successful(bundled_block_number); + + if !has_failed { + return + } + + // let's slash registered relayer + RelayersPallet::::slash_and_deregister( + relayer, + ExplicitOrAccountParams::Explicit(SlashAccount::get()), + ); + } +} + +/// Wrapper for the bridge parachains pallet that checks calls for obsolete submissions +/// and also boosts transaction priority if it has submitted by registered relayer. +/// The boost is computed as +/// `(BundledHeaderNumber - 1 - BestKnownHeaderNumber) * Priority::get()`. +/// The boost is only applied if submitter has active registration in the relayers +/// pallet. +pub struct CheckAndBoostBridgeParachainsTransactions( + PhantomData<(T, RefPara, Priority, SlashAccount)>, +); + +impl, SlashAccount: Get> + BridgeRuntimeFilterCall + for CheckAndBoostBridgeParachainsTransactions +where + T: pallet_bridge_relayers::Config + pallet_bridge_parachains::Config, + RefPara: RefundableParachainId, + T::RuntimeCall: ParachainsCallSubtype, +{ + // bridged header number, bundled in transaction + type ToPostDispatch = Option; + + fn validate( + who: &T::AccountId, + call: &T::RuntimeCall, + ) -> (Self::ToPostDispatch, TransactionValidity) { + match ParachainsCallSubtype::::check_obsolete_submit_parachain_heads( + call, + ) { + Ok(Some(our_tx)) if our_tx.base.para_id.0 == RefPara::BridgedChain::PARACHAIN_ID => { + let to_post_dispatch = Some(our_tx.base); + let total_priority_boost = + compute_priority_boost::(&who, our_tx.improved_by); + ( + to_post_dispatch, + ValidTransactionBuilder::default().priority(total_priority_boost).build(), + ) + }, + Ok(_) => (None, ValidTransactionBuilder::default().build()), + Err(e) => (None, Err(e)), + } + } + + fn post_dispatch(relayer: &T::AccountId, has_failed: bool, maybe_update: Self::ToPostDispatch) { + // we are only interested in associated pallet submissions + let Some(update) = maybe_update else { return }; + // we are only interested in failed or unneeded transactions + let has_failed = has_failed || + !SubmitParachainHeadsHelper::::was_successful(&update); + + if !has_failed { + return + } + + // let's slash registered relayer + RelayersPallet::::slash_and_deregister( + relayer, + ExplicitOrAccountParams::Explicit(SlashAccount::get()), + ); + } } -impl BridgeRuntimeFilterCall for pallet_bridge_grandpa::Pallet +impl BridgeRuntimeFilterCall + for pallet_bridge_grandpa::Pallet where T: pallet_bridge_grandpa::Config, T::RuntimeCall: GrandpaCallSubType, { - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) + type ToPostDispatch = (); + fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) { + ( + (), + GrandpaCallSubType::::check_obsolete_submit_finality_proof(call) + .and_then(|_| ValidTransactionBuilder::default().build()), + ) } } -impl BridgeRuntimeFilterCall +impl BridgeRuntimeFilterCall for pallet_bridge_parachains::Pallet where T: pallet_bridge_parachains::Config, T::RuntimeCall: ParachainsCallSubtype, { - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call) + type ToPostDispatch = (); + fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) { + ( + (), + ParachainsCallSubtype::::check_obsolete_submit_parachain_heads(call) + .and_then(|_| ValidTransactionBuilder::default().build()), + ) } } -impl, I: 'static> BridgeRuntimeFilterCall - for pallet_bridge_messages::Pallet +impl, I: 'static> + BridgeRuntimeFilterCall for pallet_bridge_messages::Pallet where T::RuntimeCall: MessagesCallSubType, { + type ToPostDispatch = (); /// Validate messages in order to avoid "mining" messages delivery and delivery confirmation /// transactions, that are delivering outdated messages/confirmations. Without this validation, /// even honest relayers may lose their funds if there are multiple relays running and /// submitting the same messages/confirmations. - fn validate(call: &T::RuntimeCall) -> TransactionValidity { - call.check_obsolete_call() + fn validate(_who: &T::AccountId, call: &T::RuntimeCall) -> ((), TransactionValidity) { + ((), call.check_obsolete_call()) } } +/// Computes priority boost that improved known header by `improved_by` +fn compute_priority_boost( + relayer: &T::AccountId, + improved_by: N, +) -> TransactionPriority +where + T: pallet_bridge_relayers::Config, + N: UniqueSaturatedInto, + Priority: Get, +{ + // we only boost priority if relayer has staked required balance + let is_relayer_registration_active = RelayersPallet::::is_registration_active(relayer); + // if tx improves by just one, there's no need to bump its priority + let improved_by: TransactionPriority = improved_by.unique_saturated_into().saturating_sub(1); + // if relayer is registered, for every skipped header we improve by `Priority` + let boost_per_header = if is_relayer_registration_active { Priority::get() } else { 0 }; + improved_by.saturating_mul(boost_per_header) +} + /// Declares a runtime-specific `BridgeRejectObsoleteHeadersAndMessages` signed extension. /// /// ## Example @@ -92,7 +266,15 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { type AccountId = $account_id; type Call = $call; type AdditionalSigned = (); - type Pre = (); + type Pre = ( + $account_id, + ( $( + <$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + $account_id, + $call, + >>::ToPostDispatch, + )* ), + ); fn additional_signed(&self) -> sp_std::result::Result< (), @@ -101,29 +283,72 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { Ok(()) } + #[allow(unused_variables)] fn validate( &self, - _who: &Self::AccountId, + who: &Self::AccountId, call: &Self::Call, _info: &sp_runtime::traits::DispatchInfoOf, _len: usize, ) -> sp_runtime::transaction_validity::TransactionValidity { - let valid = sp_runtime::transaction_validity::ValidTransaction::default(); + let tx_validity = sp_runtime::transaction_validity::ValidTransaction::default(); + let to_prepare = (); $( - let valid = valid - .combine_with(<$filter_call as $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall<$call>>::validate(call)?); + let (from_validate, call_filter_validity) = < + $filter_call as + $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + Self::AccountId, + $call, + >>::validate(&who, call); + let tx_validity = tx_validity.combine_with(call_filter_validity?); )* - Ok(valid) + Ok(tx_validity) } + #[allow(unused_variables)] fn pre_dispatch( self, - who: &Self::AccountId, + relayer: &Self::AccountId, call: &Self::Call, info: &sp_runtime::traits::DispatchInfoOf, len: usize, ) -> Result { - self.validate(who, call, info, len).map(drop) + use tuplex::PushBack; + let to_post_dispatch = (); + $( + let (from_validate, call_filter_validity) = < + $filter_call as + $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + $account_id, + $call, + >>::validate(&relayer, call); + let _ = call_filter_validity?; + let to_post_dispatch = to_post_dispatch.push_back(from_validate); + )* + Ok((relayer.clone(), to_post_dispatch)) + } + + #[allow(unused_variables)] + fn post_dispatch( + to_post_dispatch: Option, + info: &sp_runtime::traits::DispatchInfoOf, + post_info: &sp_runtime::traits::PostDispatchInfoOf, + len: usize, + result: &sp_runtime::DispatchResult, + ) -> Result<(), sp_runtime::transaction_validity::TransactionValidityError> { + use tuplex::PopFront; + let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(()) }; + let has_failed = result.is_err(); + $( + let (item, to_post_dispatch) = to_post_dispatch.pop_front(); + < + $filter_call as + $crate::extensions::check_obsolete_extension::BridgeRuntimeFilterCall< + $account_id, + $call, + >>::post_dispatch(&relayer, has_failed, item); + )* + Ok(()) } } }; @@ -132,10 +357,23 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { #[cfg(test)] mod tests { use super::*; + use crate::{ + extensions::refund_relayer_extension::{ + tests::{ + initialize_environment, relayer_account_at_this_chain, + submit_parachain_head_call_ex, submit_relay_header_call_ex, + }, + RefundableParachain, + }, + mock::*, + }; + use bp_polkadot_core::parachains::ParaId; + use bp_runtime::HeaderId; use frame_support::{assert_err, assert_ok}; use sp_runtime::{ - traits::SignedExtension, + traits::{ConstU64, SignedExtension}, transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + DispatchError, }; pub struct MockCall { @@ -143,7 +381,7 @@ mod tests { } impl sp_runtime::traits::Dispatchable for MockCall { - type RuntimeOrigin = (); + type RuntimeOrigin = u64; type Config = (); type Info = (); type PostInfo = (); @@ -156,50 +394,287 @@ mod tests { } } - struct FirstFilterCall; - impl BridgeRuntimeFilterCall for FirstFilterCall { - fn validate(call: &MockCall) -> TransactionValidity { + pub struct FirstFilterCall; + impl FirstFilterCall { + fn post_dispatch_called_with(success: bool) { + frame_support::storage::unhashed::put(&[1], &success); + } + + fn verify_post_dispatch_called_with(success: bool) { + assert_eq!(frame_support::storage::unhashed::get::(&[1]), Some(success)); + } + } + + impl BridgeRuntimeFilterCall for FirstFilterCall { + type ToPostDispatch = u64; + fn validate(_who: &u64, call: &MockCall) -> (u64, TransactionValidity) { if call.data <= 1 { - return InvalidTransaction::Custom(1).into() + return (1, InvalidTransaction::Custom(1).into()) } - Ok(ValidTransaction { priority: 1, ..Default::default() }) + (1, Ok(ValidTransaction { priority: 1, ..Default::default() })) + } + + fn post_dispatch(_who: &u64, has_failed: bool, to_post_dispatch: Self::ToPostDispatch) { + Self::post_dispatch_called_with(!has_failed); + assert_eq!(to_post_dispatch, 1); + } + } + + pub struct SecondFilterCall; + + impl SecondFilterCall { + fn post_dispatch_called_with(success: bool) { + frame_support::storage::unhashed::put(&[2], &success); + } + + fn verify_post_dispatch_called_with(success: bool) { + assert_eq!(frame_support::storage::unhashed::get::(&[2]), Some(success)); } } - struct SecondFilterCall; - impl BridgeRuntimeFilterCall for SecondFilterCall { - fn validate(call: &MockCall) -> TransactionValidity { + impl BridgeRuntimeFilterCall for SecondFilterCall { + type ToPostDispatch = u64; + fn validate(_who: &u64, call: &MockCall) -> (u64, TransactionValidity) { if call.data <= 2 { - return InvalidTransaction::Custom(2).into() + return (2, InvalidTransaction::Custom(2).into()) } - Ok(ValidTransaction { priority: 2, ..Default::default() }) + (2, Ok(ValidTransaction { priority: 2, ..Default::default() })) + } + + fn post_dispatch(_who: &u64, has_failed: bool, to_post_dispatch: Self::ToPostDispatch) { + Self::post_dispatch_called_with(!has_failed); + assert_eq!(to_post_dispatch, 2); } } #[test] - fn test() { + fn test_generated_obsolete_extension() { generate_bridge_reject_obsolete_headers_and_messages!( MockCall, - (), + u64, FirstFilterCall, SecondFilterCall ); - assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 1 }, &(), 0), - InvalidTransaction::Custom(1) - ); + run_test(|| { + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 1 }, &(), 0), + InvalidTransaction::Custom(1) + ); + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( + &42, + &MockCall { data: 1 }, + &(), + 0 + ), + InvalidTransaction::Custom(1) + ); - assert_err!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 2 }, &(), 0), - InvalidTransaction::Custom(2) - ); + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.validate(&42, &MockCall { data: 2 }, &(), 0), + InvalidTransaction::Custom(2) + ); + assert_err!( + BridgeRejectObsoleteHeadersAndMessages.pre_dispatch( + &42, + &MockCall { data: 2 }, + &(), + 0 + ), + InvalidTransaction::Custom(2) + ); - assert_ok!( - BridgeRejectObsoleteHeadersAndMessages.validate(&(), &MockCall { data: 3 }, &(), 0), - ValidTransaction { priority: 3, ..Default::default() } - ) + assert_eq!( + BridgeRejectObsoleteHeadersAndMessages + .validate(&42, &MockCall { data: 3 }, &(), 0) + .unwrap(), + ValidTransaction { priority: 3, ..Default::default() }, + ); + assert_eq!( + BridgeRejectObsoleteHeadersAndMessages + .pre_dispatch(&42, &MockCall { data: 3 }, &(), 0) + .unwrap(), + (42, (1, 2)), + ); + + // when post_dispatch is called with `Ok(())`, it is propagated to all "nested" + // extensions + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + Some((0, (1, 2))), + &(), + &(), + 0, + &Ok(()) + )); + FirstFilterCall::verify_post_dispatch_called_with(true); + SecondFilterCall::verify_post_dispatch_called_with(true); + + // when post_dispatch is called with `Err(())`, it is propagated to all "nested" + // extensions + assert_ok!(BridgeRejectObsoleteHeadersAndMessages::post_dispatch( + Some((0, (1, 2))), + &(), + &(), + 0, + &Err(DispatchError::BadOrigin) + )); + FirstFilterCall::verify_post_dispatch_called_with(false); + SecondFilterCall::verify_post_dispatch_called_with(false); + }); + } + + frame_support::parameter_types! { + pub SlashDestination: ThisChainAccountId = 42; + } + + type BridgeGrandpaWrapper = + CheckAndBoostBridgeGrandpaTransactions, SlashDestination>; + + #[test] + fn grandpa_wrapper_does_not_boost_extensions_for_unregistered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + + let priority_boost = BridgeGrandpaWrapper::validate( + &relayer_account_at_this_chain(), + &submit_relay_header_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 0); + }) + } + + #[test] + fn grandpa_wrapper_boosts_extensions_for_registered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + let priority_boost = BridgeGrandpaWrapper::validate( + &relayer_account_at_this_chain(), + &submit_relay_header_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 99_000); + }) + } + + #[test] + fn grandpa_wrapper_slashes_registered_relayer_if_transaction_fails() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeGrandpaWrapper::post_dispatch(&relayer_account_at_this_chain(), true, Some(150)); + assert!(!BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) + } + + #[test] + fn grandpa_wrapper_does_not_slash_registered_relayer_if_transaction_succeeds() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeGrandpaWrapper::post_dispatch(&relayer_account_at_this_chain(), false, Some(100)); + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) + } + + type BridgeParachainsWrapper = CheckAndBoostBridgeParachainsTransactions< + TestRuntime, + RefundableParachain<(), BridgedUnderlyingParachain>, + ConstU64<1_000>, + SlashDestination, + >; + + #[test] + fn parachains_wrapper_does_not_boost_extensions_for_unregistered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + + let priority_boost = BridgeParachainsWrapper::validate( + &relayer_account_at_this_chain(), + &submit_parachain_head_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 0); + }) + } + + #[test] + fn parachains_wrapper_boosts_extensions_for_registered_relayer() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + let priority_boost = BridgeParachainsWrapper::validate( + &relayer_account_at_this_chain(), + &submit_parachain_head_call_ex(200), + ) + .1 + .unwrap() + .priority; + assert_eq!(priority_boost, 99_000); + }) + } + + #[test] + fn parachains_wrapper_slashes_registered_relayer_if_transaction_fails() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeParachainsWrapper::post_dispatch( + &relayer_account_at_this_chain(), + true, + Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(150, Default::default()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), + para_head_hash: [150u8; 32].into(), + is_free_execution_expected: false, + }), + ); + assert!(!BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) + } + + #[test] + fn parachains_wrapper_does_not_slash_registered_relayer_if_transaction_succeeds() { + run_test(|| { + initialize_environment(100, 100, 100); + BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) + .unwrap(); + + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + BridgeParachainsWrapper::post_dispatch( + &relayer_account_at_this_chain(), + false, + Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(100, Default::default()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), + para_head_hash: [100u8; 32].into(), + is_free_execution_expected: false, + }), + ); + assert!(BridgeRelayers::is_registration_active(&relayer_account_at_this_chain())); + }) } } diff --git a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs index 5035553f508d..92810290f95e 100644 --- a/bridges/bin/runtime-common/src/extensions/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/extensions/priority_calculator.rs @@ -22,7 +22,6 @@ //! single message with nonce `N`, then the transaction with nonces `N..=N+100` will //! be rejected. This can lower bridge throughput down to one message per block. -use bp_messages::MessageNonce; use frame_support::traits::Get; use sp_runtime::transaction_validity::TransactionPriority; @@ -30,16 +29,19 @@ use sp_runtime::transaction_validity::TransactionPriority; #[allow(unused_imports)] pub use integrity_tests::*; -/// Compute priority boost for message delivery transaction that delivers -/// given number of messages. -pub fn compute_priority_boost( - messages: MessageNonce, -) -> TransactionPriority +/// We'll deal with different bridge items here - messages, headers, ... +/// To avoid being too verbose with generic code, let's just define a separate alias. +pub type ItemCount = u64; + +/// Compute priority boost for transaction that brings given number of bridge +/// items (messages, headers, ...), when every additional item adds `PriorityBoostPerItem` +/// to transaction priority. +pub fn compute_priority_boost(n_items: ItemCount) -> TransactionPriority where - PriorityBoostPerMessage: Get, + PriorityBoostPerItem: Get, { - // we don't want any boost for transaction with single message => minus one - PriorityBoostPerMessage::get().saturating_mul(messages.saturating_sub(1)) + // we don't want any boost for transaction with single (additional) item => minus one + PriorityBoostPerItem::get().saturating_mul(n_items.saturating_sub(1)) } #[cfg(not(feature = "integrity-test"))] @@ -47,7 +49,8 @@ mod integrity_tests {} #[cfg(feature = "integrity-test")] mod integrity_tests { - use super::compute_priority_boost; + use super::{compute_priority_boost, ItemCount}; + use crate::extensions::refund_relayer_extension::RefundableParachainId; use bp_messages::MessageNonce; use bp_runtime::PreComputedSize; @@ -55,7 +58,6 @@ mod integrity_tests { dispatch::{DispatchClass, DispatchInfo, Pays, PostDispatchInfo}, traits::Get, }; - use pallet_bridge_messages::WeightInfoExt; use pallet_transaction_payment::OnChargeTransaction; use sp_runtime::{ traits::{Dispatchable, UniqueSaturatedInto, Zero}, @@ -68,37 +70,33 @@ mod integrity_tests { T, >>::Balance; - /// Ensures that the value of `PriorityBoostPerMessage` matches the value of - /// `tip_boost_per_message`. + /// Ensures that the value of `PriorityBoostPerItem` matches the value of + /// `tip_boost_per_item`. /// - /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have almost - /// the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want to be sure - /// that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the priority will be close + /// We want two transactions, `TX1` with `N` items and `TX2` with `N+1` items, have almost + /// the same priority if we'll add `tip_boost_per_item` tip to the `TX1`. We want to be sure + /// that if we add plain `PriorityBoostPerItem` priority to `TX1`, the priority will be close /// to `TX2` as well. - pub fn ensure_priority_boost_is_sane( - tip_boost_per_message: BalanceOf, + fn ensure_priority_boost_is_sane( + param_name: &str, + max_items: ItemCount, + tip_boost_per_item: Balance, + estimate_priority: impl Fn(ItemCount, Balance) -> TransactionPriority, ) where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - PriorityBoostPerMessage: Get, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, + PriorityBoostPerItem: Get, + ItemCount: UniqueSaturatedInto, + Balance: FixedPointOperand + Zero, { - let priority_boost_per_message = PriorityBoostPerMessage::get(); - let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); - for messages in 1..=maximal_messages_in_delivery_transaction { - let base_priority = estimate_message_delivery_transaction_priority::< - Runtime, - MessagesInstance, - >(messages, Zero::zero()); - let priority_boost = compute_priority_boost::(messages); - let priority_with_boost = base_priority + priority_boost; - - let tip = tip_boost_per_message.saturating_mul((messages - 1).unique_saturated_into()); - let priority_with_tip = - estimate_message_delivery_transaction_priority::(1, tip); + let priority_boost_per_item = PriorityBoostPerItem::get(); + for n_items in 1..=max_items { + let base_priority = estimate_priority(n_items, Zero::zero()); + let priority_boost = compute_priority_boost::(n_items); + let priority_with_boost = base_priority + .checked_add(priority_boost) + .expect("priority overflow: try lowering `max_items` or `tip_boost_per_item`?"); + + let tip = tip_boost_per_item.saturating_mul((n_items - 1).unique_saturated_into()); + let priority_with_tip = estimate_priority(1, tip); const ERROR_MARGIN: TransactionPriority = 5; // 5% if priority_with_boost.abs_diff(priority_with_tip).saturating_mul(100) / @@ -106,97 +104,304 @@ mod integrity_tests { ERROR_MARGIN { panic!( - "The PriorityBoostPerMessage value ({}) must be fixed to: {}", - priority_boost_per_message, - compute_priority_boost_per_message::( - tip_boost_per_message + "The {param_name} value ({}) must be fixed to: {}", + priority_boost_per_item, + compute_priority_boost_per_item( + max_items, + tip_boost_per_item, + estimate_priority ), ); } } } - /// Compute priority boost that we give to message delivery transaction for additional message. + /// Compute priority boost that we give to bridge transaction for every + /// additional bridge item. #[cfg(feature = "integrity-test")] - fn compute_priority_boost_per_message( - tip_boost_per_message: BalanceOf, + fn compute_priority_boost_per_item( + max_items: ItemCount, + tip_boost_per_item: Balance, + estimate_priority: impl Fn(ItemCount, Balance) -> TransactionPriority, ) -> TransactionPriority where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, + ItemCount: UniqueSaturatedInto, + Balance: FixedPointOperand + Zero, { - // estimate priority of transaction that delivers one message and has large tip - let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); + // estimate priority of transaction that delivers one item and has large tip let small_with_tip_priority = - estimate_message_delivery_transaction_priority::( - 1, - tip_boost_per_message - .saturating_mul(maximal_messages_in_delivery_transaction.saturated_into()), - ); - // estimate priority of transaction that delivers maximal number of messages, but has no tip - let large_without_tip_priority = estimate_message_delivery_transaction_priority::< - Runtime, - MessagesInstance, - >(maximal_messages_in_delivery_transaction, Zero::zero()); + estimate_priority(1, tip_boost_per_item.saturating_mul(max_items.saturated_into())); + // estimate priority of transaction that delivers maximal number of items, but has no tip + let large_without_tip_priority = estimate_priority(max_items, Zero::zero()); small_with_tip_priority .saturating_sub(large_without_tip_priority) - .saturating_div(maximal_messages_in_delivery_transaction - 1) + .saturating_div(max_items - 1) } - /// Estimate message delivery transaction priority. - #[cfg(feature = "integrity-test")] - fn estimate_message_delivery_transaction_priority( - messages: MessageNonce, - tip: BalanceOf, - ) -> TransactionPriority - where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, - { - // just an estimation of extra transaction bytes that are added to every transaction - // (including signature, signed extensions extra and etc + in our case it includes - // all call arguments except the proof itself) - let base_tx_size = 512; - // let's say we are relaying similar small messages and for every message we add more trie - // nodes to the proof (x0.5 because we expect some nodes to be reused) - let estimated_message_size = 512; - // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = - Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); - // messages proof argument size is (for every message) messages size + some additional - // trie nodes. Some of them are reused by different messages, so let's take 2/3 of default - // "overhead" constant - let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() - .saturating_mul(2) - .saturating_div(3) - .saturating_add(estimated_message_size) - .saturating_mul(messages as _); - - // finally we are able to estimate transaction size and weight - let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( - &PreComputedSize(transaction_size as _), - messages as _, - estimated_message_dispatch_weight.saturating_mul(messages), - ); - - pallet_transaction_payment::ChargeTransactionPayment::::get_priority( - &DispatchInfo { - weight: transaction_weight, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }, - transaction_size as _, - tip, - Zero::zero(), - ) + /// Computations, specific to bridge relay chains transactions. + pub mod per_relay_header { + use super::*; + + use bp_header_chain::{ + max_expected_submit_finality_proof_arguments_size, ChainWithGrandpa, + }; + use pallet_bridge_grandpa::WeightInfoExt; + + /// Ensures that the value of `PriorityBoostPerHeader` matches the value of + /// `tip_boost_per_header`. + /// + /// We want two transactions, `TX1` with `N` headers and `TX2` with `N+1` headers, have + /// almost the same priority if we'll add `tip_boost_per_header` tip to the `TX1`. We want + /// to be sure that if we add plain `PriorityBoostPerHeader` priority to `TX1`, the priority + /// will be close to `TX2` as well. + pub fn ensure_priority_boost_is_sane( + tip_boost_per_header: BalanceOf, + ) where + Runtime: + pallet_transaction_payment::Config + pallet_bridge_grandpa::Config, + GrandpaInstance: 'static, + PriorityBoostPerHeader: Get, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // the meaning of `max_items` here is different when comparing with message + // transactions - with messages we have a strict limit on maximal number of + // messages we can fit into a single transaction. With headers, current best + // header may be improved by any "number of items". But this number is only + // used to verify priority boost, so it should be fine to select this arbitrary + // value - it SHALL NOT affect any value, it just adds more tests for the value. + let maximal_improved_by = 4_096; + super::ensure_priority_boost_is_sane::>( + "PriorityBoostPerRelayHeader", + maximal_improved_by, + tip_boost_per_header, + |_n_headers, tip| { + estimate_relay_header_submit_transaction_priority::( + tip, + ) + }, + ); + } + + /// Estimate relay header delivery transaction priority. + #[cfg(feature = "integrity-test")] + fn estimate_relay_header_submit_transaction_priority( + tip: BalanceOf, + ) -> TransactionPriority + where + Runtime: + pallet_transaction_payment::Config + pallet_bridge_grandpa::Config, + GrandpaInstance: 'static, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // just an estimation of extra transaction bytes that are added to every transaction + // (including signature, signed extensions extra and etc + in our case it includes + // all call arguments except the proof itself) + let base_tx_size = 512; + // let's say we are relaying largest relay chain headers + let tx_call_size = max_expected_submit_finality_proof_arguments_size::< + Runtime::BridgedChain, + >(true, Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1); + + // finally we are able to estimate transaction size and weight + let transaction_size = base_tx_size.saturating_add(tx_call_size); + let transaction_weight = Runtime::WeightInfo::submit_finality_proof_weight( + Runtime::BridgedChain::MAX_AUTHORITIES_COUNT * 2 / 3 + 1, + Runtime::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY, + ); + + pallet_transaction_payment::ChargeTransactionPayment::::get_priority( + &DispatchInfo { + weight: transaction_weight, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }, + transaction_size as _, + tip, + Zero::zero(), + ) + } + } + + /// Computations, specific to bridge parachains transactions. + pub mod per_parachain_header { + use super::*; + + use bp_runtime::Parachain; + use pallet_bridge_parachains::WeightInfoExt; + + /// Ensures that the value of `PriorityBoostPerHeader` matches the value of + /// `tip_boost_per_header`. + /// + /// We want two transactions, `TX1` with `N` headers and `TX2` with `N+1` headers, have + /// almost the same priority if we'll add `tip_boost_per_header` tip to the `TX1`. We want + /// to be sure that if we add plain `PriorityBoostPerHeader` priority to `TX1`, the priority + /// will be close to `TX2` as well. + pub fn ensure_priority_boost_is_sane( + tip_boost_per_header: BalanceOf, + ) where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_parachains::Config, + RefundableParachain: RefundableParachainId, + PriorityBoostPerHeader: Get, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // the meaning of `max_items` here is different when comparing with message + // transactions - with messages we have a strict limit on maximal number of + // messages we can fit into a single transaction. With headers, current best + // header may be improved by any "number of items". But this number is only + // used to verify priority boost, so it should be fine to select this arbitrary + // value - it SHALL NOT affect any value, it just adds more tests for the value. + let maximal_improved_by = 4_096; + super::ensure_priority_boost_is_sane::>( + "PriorityBoostPerParachainHeader", + maximal_improved_by, + tip_boost_per_header, + |_n_headers, tip| { + estimate_parachain_header_submit_transaction_priority::< + Runtime, + RefundableParachain, + >(tip) + }, + ); + } + + /// Estimate parachain header delivery transaction priority. + #[cfg(feature = "integrity-test")] + fn estimate_parachain_header_submit_transaction_priority( + tip: BalanceOf, + ) -> TransactionPriority + where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_parachains::Config, + RefundableParachain: RefundableParachainId, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // just an estimation of extra transaction bytes that are added to every transaction + // (including signature, signed extensions extra and etc + in our case it includes + // all call arguments except the proof itself) + let base_tx_size = 512; + // let's say we are relaying largest parachain headers and proof takes some more bytes + let tx_call_size = >::WeightInfo::expected_extra_storage_proof_size() + .saturating_add(RefundableParachain::BridgedChain::MAX_HEADER_SIZE); + + // finally we are able to estimate transaction size and weight + let transaction_size = base_tx_size.saturating_add(tx_call_size); + let transaction_weight = >::WeightInfo::submit_parachain_heads_weight( + Runtime::DbWeight::get(), + &PreComputedSize(transaction_size as _), + // just one parachain - all other submissions won't receive any boost + 1, + ); + + pallet_transaction_payment::ChargeTransactionPayment::::get_priority( + &DispatchInfo { + weight: transaction_weight, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }, + transaction_size as _, + tip, + Zero::zero(), + ) + } + } + + /// Computations, specific to bridge messages transactions. + pub mod per_message { + use super::*; + + use pallet_bridge_messages::WeightInfoExt; + + /// Ensures that the value of `PriorityBoostPerMessage` matches the value of + /// `tip_boost_per_message`. + /// + /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have + /// almost the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want + /// to be sure that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the + /// priority will be close to `TX2` as well. + pub fn ensure_priority_boost_is_sane( + tip_boost_per_message: BalanceOf, + ) where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_messages::Config, + MessagesInstance: 'static, + PriorityBoostPerMessage: Get, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + let maximal_messages_in_delivery_transaction = + Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); + super::ensure_priority_boost_is_sane::>( + "PriorityBoostPerMessage", + maximal_messages_in_delivery_transaction, + tip_boost_per_message, + |n_messages, tip| { + estimate_message_delivery_transaction_priority::( + n_messages, tip, + ) + }, + ); + } + + /// Estimate message delivery transaction priority. + #[cfg(feature = "integrity-test")] + fn estimate_message_delivery_transaction_priority( + messages: MessageNonce, + tip: BalanceOf, + ) -> TransactionPriority + where + Runtime: pallet_transaction_payment::Config + + pallet_bridge_messages::Config, + MessagesInstance: 'static, + Runtime::RuntimeCall: Dispatchable, + BalanceOf: Send + Sync + FixedPointOperand, + { + // just an estimation of extra transaction bytes that are added to every transaction + // (including signature, signed extensions extra and etc + in our case it includes + // all call arguments except the proof itself) + let base_tx_size = 512; + // let's say we are relaying similar small messages and for every message we add more + // trie nodes to the proof (x0.5 because we expect some nodes to be reused) + let estimated_message_size = 512; + // let's say all our messages have the same dispatch weight + let estimated_message_dispatch_weight = + Runtime::WeightInfo::message_dispatch_weight(estimated_message_size); + // messages proof argument size is (for every message) messages size + some additional + // trie nodes. Some of them are reused by different messages, so let's take 2/3 of + // default "overhead" constant + let messages_proof_size = Runtime::WeightInfo::expected_extra_storage_proof_size() + .saturating_mul(2) + .saturating_div(3) + .saturating_add(estimated_message_size) + .saturating_mul(messages as _); + + // finally we are able to estimate transaction size and weight + let transaction_size = base_tx_size.saturating_add(messages_proof_size); + let transaction_weight = Runtime::WeightInfo::receive_messages_proof_weight( + &PreComputedSize(transaction_size as _), + messages as _, + estimated_message_dispatch_weight.saturating_mul(messages), + ); + + pallet_transaction_payment::ChargeTransactionPayment::::get_priority( + &DispatchInfo { + weight: transaction_weight, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }, + transaction_size as _, + tip, + Zero::zero(), + ) + } } } diff --git a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs index 64ae1d0b669f..5aa7f1c095d5 100644 --- a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs @@ -24,7 +24,7 @@ use crate::messages_call_ext::{ }; use bp_messages::{LaneId, MessageNonce}; use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{Chain, Parachain, ParachainIdOf, RangeInclusiveExt, StaticStrProvider}; +use bp_runtime::{Parachain, RangeInclusiveExt, StaticStrProvider}; use codec::{Codec, Decode, Encode}; use frame_support::{ dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo}, @@ -33,8 +33,7 @@ use frame_support::{ CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use pallet_bridge_grandpa::{ - CallSubType as GrandpaCallSubType, Config as GrandpaConfig, SubmitFinalityProofHelper, - SubmitFinalityProofInfo, + CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo, }; use pallet_bridge_messages::Config as MessagesConfig; use pallet_bridge_parachains::{ @@ -66,20 +65,9 @@ type CallOf = ::RuntimeCall; /// coming from this parachain. pub trait RefundableParachainId { /// The instance of the bridge parachains pallet. - type Instance; + type Instance: 'static; /// The parachain Id. - type Id: Get; -} - -/// Default implementation of `RefundableParachainId`. -pub struct DefaultRefundableParachainId(PhantomData<(Instance, Id)>); - -impl RefundableParachainId for DefaultRefundableParachainId -where - Id: Get, -{ - type Instance = Instance; - type Id = Id; + type BridgedChain: Parachain; } /// Implementation of `RefundableParachainId` for `trait Parachain`. @@ -87,10 +75,11 @@ pub struct RefundableParachain(PhantomData<(Instance, Para)>); impl RefundableParachainId for RefundableParachain where + Instance: 'static, Para: Parachain, { type Instance = Instance; - type Id = ParachainIdOf; + type BridgedChain = Para; } /// Trait identifying a bridged messages lane. A relayer might be refunded for delivering messages @@ -242,17 +231,10 @@ pub enum RelayerAccountAction { /// Everything common among our refund signed extensions. pub trait RefundSignedExtension: 'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo -where - >::BridgedChain: - Chain, { /// This chain runtime. - type Runtime: UtilityConfig> - + GrandpaConfig - + MessagesConfig<::Instance> + type Runtime: MessagesConfig<::Instance> + RelayersConfig; - /// Grandpa pallet reference. - type GrandpaInstance: 'static; /// Messages pallet and lane reference. type Msgs: RefundableMessagesLaneId; /// Refund amount calculator. @@ -276,11 +258,13 @@ where call: &CallOf, ) -> Result<&CallOf, TransactionValidityError>; - /// Called from post-dispatch and shall perform additional checks (apart from relay - /// chain finality and messages transaction finality) of given call result. + /// Called from post-dispatch and shall perform additional checks (apart from messages + /// transaction success) of given call result. fn additional_call_result_check( relayer: &AccountIdOf, call_info: &CallInfo, + extra_weight: &mut Weight, + extra_size: &mut u32, ) -> bool; /// Given post-dispatch information, analyze the outcome of relayer call and return @@ -348,35 +332,6 @@ where return slash_relayer_if_delivery_result } - // check if relay chain state has been updated - if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { - if !SubmitFinalityProofHelper::::was_successful( - finality_proof_info.block_number, - ) { - // we only refund relayer if all calls have updated chain state - log::trace!( - target: "runtime::bridge", - "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof", - Self::Id::STR, - ::Id::get(), - relayer, - ); - return slash_relayer_if_delivery_result - } - - // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll` - // transaction. If relay chain header is mandatory, the GRANDPA pallet returns - // `Pays::No`, because such transaction is mandatory for operating the bridge. But - // `utility.batchAll` transaction always requires payment. But in both cases we'll - // refund relayer - either explicitly here, or using `Pays::No` if he's choosing - // to submit dedicated transaction. - - // submitter has means to include extra weight/bytes in the `submit_finality_proof` - // call, so let's subtract extra weight/size to avoid refunding for this extra stuff - extra_weight = finality_proof_info.extra_weight; - extra_size = finality_proof_info.extra_size; - } - // Check if the `ReceiveMessagesProof` call delivered at least some of the messages that // it contained. If this happens, we consider the transaction "helpful" and refund it. let msgs_call_info = call_info.messages_call_info(); @@ -391,8 +346,13 @@ where return slash_relayer_if_delivery_result } - // do additional check - if !Self::additional_call_result_check(&relayer, &call_info) { + // do additional checks + if !Self::additional_call_result_check( + &relayer, + &call_info, + &mut extra_weight, + &mut extra_size, + ) { return slash_relayer_if_delivery_result } @@ -468,18 +428,11 @@ where RuntimeDebugNoBound, TypeInfo, )] -pub struct RefundSignedExtensionAdapter(T) -where - >::BridgedChain: - Chain; +pub struct RefundSignedExtensionAdapter(T); impl SignedExtension for RefundSignedExtensionAdapter where - >::BridgedChain: - Chain, CallOf: Dispatchable - + IsSubType, T::Runtime>> - + GrandpaCallSubType + MessagesCallSubType::Instance>, { const IDENTIFIER: &'static str = T::Id::STR; @@ -644,6 +597,14 @@ impl RefundSignedExtension for RefundBridgedParachainMessages where Self: 'static + Send + Sync, + RefundBridgedGrandpaMessages< + Runtime, + Runtime::BridgesGrandpaPalletInstance, + Msgs, + Refund, + Priority, + Id, + >: 'static + Send + Sync, Runtime: UtilityConfig> + BoundedBridgeGrandpaConfig + ParachainsConfig @@ -661,7 +622,6 @@ where + MessagesCallSubType, { type Runtime = Runtime; - type GrandpaInstance = Runtime::BridgesGrandpaPalletInstance; type Msgs = Msgs; type Refund = Refund; type Priority = Priority; @@ -687,7 +647,7 @@ where let para_finality_call = calls .next() .transpose()? - .and_then(|c| c.submit_parachain_heads_info_for(Para::Id::get())); + .and_then(|c| c.submit_parachain_heads_info_for(Para::BridgedChain::PARACHAIN_ID)); let relay_finality_call = calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info()); @@ -711,7 +671,26 @@ where Ok(call) } - fn additional_call_result_check(relayer: &Runtime::AccountId, call_info: &CallInfo) -> bool { + fn additional_call_result_check( + relayer: &Runtime::AccountId, + call_info: &CallInfo, + extra_weight: &mut Weight, + extra_size: &mut u32, + ) -> bool { + // check if relay chain state has been updated + let is_grandpa_call_successful = + RefundBridgedGrandpaMessages::< + Runtime, + Runtime::BridgesGrandpaPalletInstance, + Msgs, + Refund, + Priority, + Id, + >::additional_call_result_check(relayer, call_info, extra_weight, extra_size); + if !is_grandpa_call_successful { + return false + } + // check if parachain state has been updated if let Some(para_proof_info) = call_info.submit_parachain_heads_info() { if !SubmitParachainHeadsHelper::::was_successful( @@ -722,7 +701,7 @@ where target: "runtime::bridge", "{} from parachain {} via {:?}: relayer {:?} has submitted invalid parachain finality proof", Id::STR, - Para::Id::get(), + Para::BridgedChain::PARACHAIN_ID, Msgs::Id::get(), relayer, ); @@ -794,7 +773,6 @@ where + MessagesCallSubType, { type Runtime = Runtime; - type GrandpaInstance = GrandpaInstance; type Msgs = Msgs; type Refund = Refund; type Priority = Priority; @@ -836,13 +814,125 @@ where Ok(call) } - fn additional_call_result_check(_relayer: &Runtime::AccountId, _call_info: &CallInfo) -> bool { + fn additional_call_result_check( + relayer: &Runtime::AccountId, + call_info: &CallInfo, + extra_weight: &mut Weight, + extra_size: &mut u32, + ) -> bool { + // check if relay chain state has been updated + if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { + if !SubmitFinalityProofHelper::::was_successful( + finality_proof_info.block_number, + ) { + // we only refund relayer if all calls have updated chain state + log::trace!( + target: "runtime::bridge", + "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof", + Self::Id::STR, + ::Id::get(), + relayer, + ); + return false + } + + // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll` + // transaction. If relay chain header is mandatory, the GRANDPA pallet returns + // `Pays::No`, because such transaction is mandatory for operating the bridge. But + // `utility.batchAll` transaction always requires payment. But in both cases we'll + // refund relayer - either explicitly here, or using `Pays::No` if he's choosing + // to submit dedicated transaction. + + // submitter has means to include extra weight/bytes in the `submit_finality_proof` + // call, so let's subtract extra weight/size to avoid refunding for this extra stuff + *extra_weight = (*extra_weight).saturating_add(finality_proof_info.extra_weight); + *extra_size = (*extra_size).saturating_add(finality_proof_info.extra_size); + } + + true + } +} + +/// Transaction extension that refunds a relayer for standalone messages delivery and confirmation +/// transactions. Finality transactions are not refunded. +#[derive( + DefaultNoBound, + CloneNoBound, + Decode, + Encode, + EqNoBound, + PartialEqNoBound, + RuntimeDebugNoBound, + TypeInfo, +)] +#[scale_info(skip_type_params(Runtime, GrandpaInstance, Msgs, Refund, Priority, Id))] +pub struct RefundBridgedMessages( + PhantomData<( + // runtime with `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed + Runtime, + // implementation of `RefundableMessagesLaneId` trait, which specifies the instance of + // the used `pallet-bridge-messages` pallet and the lane within this pallet + Msgs, + // implementation of the `RefundCalculator` trait, that is used to compute refund that + // we give to relayer for his transaction + Refund, + // getter for per-message `TransactionPriority` boost that we give to message + // delivery transactions + Priority, + // the runtime-unique identifier of this signed extension + Id, + )>, +); + +impl RefundSignedExtension + for RefundBridgedMessages +where + Self: 'static + Send + Sync, + Runtime: MessagesConfig + RelayersConfig, + Msgs: RefundableMessagesLaneId, + Refund: RefundCalculator, + Priority: Get, + Id: StaticStrProvider, + CallOf: Dispatchable + + MessagesCallSubType, +{ + type Runtime = Runtime; + type Msgs = Msgs; + type Refund = Refund; + type Priority = Priority; + type Id = Id; + + fn expand_call(call: &CallOf) -> Vec<&CallOf> { + vec![call] + } + + fn parse_and_check_for_obsolete_call( + call: &CallOf, + ) -> Result, TransactionValidityError> { + let call = Self::check_obsolete_parsed_call(call)?; + Ok(call.call_info_for(Msgs::Id::get()).map(CallInfo::Msgs)) + } + + fn check_obsolete_parsed_call( + call: &CallOf, + ) -> Result<&CallOf, TransactionValidityError> { + call.check_obsolete_call()?; + Ok(call) + } + + fn additional_call_result_check( + _relayer: &Runtime::AccountId, + _call_info: &CallInfo, + _extra_weight: &mut Weight, + _extra_size: &mut u32, + ) -> bool { + // everything is checked by the `RefundTransactionExtension` true } } #[cfg(test)] -mod tests { +pub(crate) mod tests { use super::*; use crate::{ messages::{ @@ -854,6 +944,7 @@ mod tests { }, mock::*, }; + use bp_header_chain::StoredHeaderDataBuilder; use bp_messages::{ DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, @@ -879,7 +970,6 @@ mod tests { }; parameter_types! { - TestParachain: u32 = 1000; pub TestLaneId: LaneId = TEST_LANE_ID; pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( TEST_LANE_ID, @@ -895,6 +985,14 @@ mod tests { bp_runtime::generate_static_str_provider!(TestExtension); + type TestMessagesExtensionProvider = RefundBridgedMessages< + TestRuntime, + RefundableMessagesLane<(), TestLaneId>, + ActualFeeRefund, + ConstU64<1>, + StrTestExtension, + >; + type TestMessagesExtension = RefundSignedExtensionAdapter; type TestGrandpaExtensionProvider = RefundBridgedGrandpaMessages< TestRuntime, (), @@ -906,7 +1004,7 @@ mod tests { type TestGrandpaExtension = RefundSignedExtensionAdapter; type TestExtensionProvider = RefundBridgedParachainMessages< TestRuntime, - DefaultRefundableParachainId<(), TestParachain>, + RefundableParachain<(), BridgedUnderlyingParachain>, RefundableMessagesLane<(), TestLaneId>, ActualFeeRefund, ConstU64<1>, @@ -930,7 +1028,7 @@ mod tests { TestPaymentProcedure::rewards_account(MsgDeliveryProofsRewardsAccount::get()) } - fn relayer_account_at_this_chain() -> ThisChainAccountId { + pub fn relayer_account_at_this_chain() -> ThisChainAccountId { 0 } @@ -938,7 +1036,7 @@ mod tests { 0 } - fn initialize_environment( + pub fn initialize_environment( best_relay_header_number: RelayBlockNumber, parachain_head_at_relay_header_number: RelayBlockNumber, best_message: MessageNonce, @@ -949,8 +1047,12 @@ mod tests { StoredAuthoritySet::try_new(authorities, TEST_GRANDPA_SET_ID).unwrap(), ); pallet_bridge_grandpa::BestFinalized::::put(best_relay_header); + pallet_bridge_grandpa::ImportedHeaders::::insert( + best_relay_header.hash(), + bp_test_utils::test_header::(0).build(), + ); - let para_id = ParaId(TestParachain::get()); + let para_id = ParaId(BridgedUnderlyingParachain::PARACHAIN_ID); let para_info = ParaInfo { best_head_hash: BestParaHeadHash { at_relay_block_number: parachain_head_at_relay_header_number, @@ -994,7 +1096,7 @@ mod tests { }) } - fn submit_relay_header_call_ex(relay_header_number: RelayBlockNumber) -> RuntimeCall { + pub fn submit_relay_header_call_ex(relay_header_number: RelayBlockNumber) -> RuntimeCall { let relay_header = BridgedChainHeader::new( relay_header_number, Default::default(), @@ -1008,6 +1110,7 @@ mod tests { finality_target: Box::new(relay_header), justification: relay_justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }) } @@ -1017,10 +1120,24 @@ mod tests { RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads { at_relay_block: (parachain_head_at_relay_header_number, RelayBlockHash::default()), parachains: vec![( - ParaId(TestParachain::get()), + ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), + [parachain_head_at_relay_header_number as u8; 32].into(), + )], + parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + }) + } + + pub fn submit_parachain_head_call_ex( + parachain_head_at_relay_header_number: RelayBlockNumber, + ) -> RuntimeCall { + RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads_ex { + at_relay_block: (parachain_head_at_relay_header_number, RelayBlockHash::default()), + parachains: vec![( + ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [parachain_head_at_relay_header_number as u8; 32].into(), )], parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, + is_free_execution_expected: false, }) } @@ -1151,7 +1268,7 @@ mod tests { RuntimeCall::Utility(UtilityCall::batch_all { calls: vec![ submit_relay_header_call_ex(relay_header_number), - submit_parachain_head_call(parachain_head_at_relay_header_number), + submit_parachain_head_call_ex(parachain_head_at_relay_header_number), message_delivery_call(best_message), ], }) @@ -1179,7 +1296,7 @@ mod tests { RuntimeCall::Utility(UtilityCall::batch_all { calls: vec![ submit_relay_header_call_ex(relay_header_number), - submit_parachain_head_call(parachain_head_at_relay_header_number), + submit_parachain_head_call_ex(parachain_head_at_relay_header_number), message_confirmation_call(best_message), ], }) @@ -1194,11 +1311,14 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { base: BaseMessagesProofInfo { @@ -1231,11 +1351,14 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( BaseMessagesProofInfo { @@ -1264,6 +1387,8 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { base: BaseMessagesProofInfo { @@ -1296,6 +1421,8 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( BaseMessagesProofInfo { @@ -1320,9 +1447,10 @@ mod tests { relayer: relayer_account_at_this_chain(), call_info: CallInfo::ParachainFinalityAndMsgs( SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { base: BaseMessagesProofInfo { @@ -1344,9 +1472,10 @@ mod tests { relayer: relayer_account_at_this_chain(), call_info: CallInfo::ParachainFinalityAndMsgs( SubmitParachainHeadsInfo { - at_relay_block_number: 200, - para_id: ParaId(TestParachain::get()), + at_relay_block: HeaderId(200, [0u8; 32].into()), + para_id: ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), para_head_hash: [200u8; 32].into(), + is_free_execution_expected: false, }, MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( BaseMessagesProofInfo { @@ -1421,8 +1550,14 @@ mod tests { extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) } - fn run_validate_ignore_priority(call: RuntimeCall) -> TransactionValidity { - run_validate(call).map(|mut tx| { + fn run_messages_validate(call: RuntimeCall) -> TransactionValidity { + let extension: TestMessagesExtension = + RefundSignedExtensionAdapter(RefundBridgedMessages(PhantomData)); + extension.validate(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + } + + fn ignore_priority(tx: TransactionValidity) -> TransactionValidity { + tx.map(|mut tx| { tx.priority = 0; tx }) @@ -1444,6 +1579,14 @@ mod tests { extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) } + fn run_messages_pre_dispatch( + call: RuntimeCall, + ) -> Result>, TransactionValidityError> { + let extension: TestMessagesExtension = + RefundSignedExtensionAdapter(RefundBridgedMessages(PhantomData)); + extension.pre_dispatch(&relayer_account_at_this_chain(), &call, &DispatchInfo::default(), 0) + } + fn dispatch_info() -> DispatchInfo { DispatchInfo { weight: Weight::from_parts( @@ -1502,40 +1645,48 @@ mod tests { Balances::set_balance(&relayer_account_at_this_chain(), ExistentialDeposit::get()); // message delivery is failing - assert_eq!(run_validate(message_delivery_call(200)), Ok(Default::default()),); - assert_eq!( - run_validate(parachain_finality_and_delivery_batch_call(200, 200)), - Ok(Default::default()), - ); - assert_eq!( - run_validate(all_finality_and_delivery_batch_call(200, 200, 200)), - Ok(Default::default()), - ); + let fns = [run_validate, run_grandpa_validate, run_messages_validate]; + for f in fns { + assert_eq!(f(message_delivery_call(200)), Ok(Default::default()),); + assert_eq!( + f(parachain_finality_and_delivery_batch_call(200, 200)), + Ok(Default::default()), + ); + assert_eq!( + f(all_finality_and_delivery_batch_call(200, 200, 200)), + Ok(Default::default()), + ); + assert_eq!( + f(all_finality_and_delivery_batch_call_ex(200, 200, 200)), + Ok(Default::default()), + ); + } + + // message confirmation validation is passing assert_eq!( - run_validate(all_finality_and_delivery_batch_call_ex(200, 200, 200)), + ignore_priority(run_validate(message_confirmation_call(200))), Ok(Default::default()), ); - // message confirmation validation is passing assert_eq!( - run_validate_ignore_priority(message_confirmation_call(200)), + ignore_priority(run_messages_validate(message_confirmation_call(200))), Ok(Default::default()), ); assert_eq!( - run_validate_ignore_priority(parachain_finality_and_confirmation_batch_call( + ignore_priority(run_validate(parachain_finality_and_confirmation_batch_call( 200, 200 - )), + ))), Ok(Default::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call( 200, 200, 200 - )), + ))), Ok(Default::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call_ex( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call_ex( 200, 200, 200 - )), + ))), Ok(Default::default()), ); }); @@ -1549,25 +1700,28 @@ mod tests { BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) .unwrap(); - let priority_of_100_messages_delivery = - run_validate(message_delivery_call(200)).unwrap().priority; - let priority_of_200_messages_delivery = - run_validate(message_delivery_call(300)).unwrap().priority; - assert!( - priority_of_200_messages_delivery > priority_of_100_messages_delivery, - "Invalid priorities: {} for 200 messages vs {} for 100 messages", - priority_of_200_messages_delivery, - priority_of_100_messages_delivery, - ); + let fns = [run_validate, run_grandpa_validate, run_messages_validate]; + for f in fns { + let priority_of_100_messages_delivery = + f(message_delivery_call(200)).unwrap().priority; + let priority_of_200_messages_delivery = + f(message_delivery_call(300)).unwrap().priority; + assert!( + priority_of_200_messages_delivery > priority_of_100_messages_delivery, + "Invalid priorities: {} for 200 messages vs {} for 100 messages", + priority_of_200_messages_delivery, + priority_of_100_messages_delivery, + ); - let priority_of_100_messages_confirmation = - run_validate(message_confirmation_call(200)).unwrap().priority; - let priority_of_200_messages_confirmation = - run_validate(message_confirmation_call(300)).unwrap().priority; - assert_eq!( - priority_of_100_messages_confirmation, - priority_of_200_messages_confirmation - ); + let priority_of_100_messages_confirmation = + f(message_confirmation_call(200)).unwrap().priority; + let priority_of_200_messages_confirmation = + f(message_confirmation_call(300)).unwrap().priority; + assert_eq!( + priority_of_100_messages_confirmation, + priority_of_200_messages_confirmation + ); + } }); } @@ -1579,23 +1733,24 @@ mod tests { BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) .unwrap(); - let priority_of_max_messages_delivery = run_validate(message_delivery_call( - 100 + MaxUnconfirmedMessagesAtInboundLane::get(), - )) - .unwrap() - .priority; - let priority_of_more_than_max_messages_delivery = run_validate(message_delivery_call( - 100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1, - )) - .unwrap() - .priority; - - assert!( - priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, - "Invalid priorities: {} for MAX messages vs {} for MAX+1 messages", - priority_of_max_messages_delivery, - priority_of_more_than_max_messages_delivery, - ); + let fns = [run_validate, run_grandpa_validate, run_messages_validate]; + for f in fns { + let priority_of_max_messages_delivery = + f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get())) + .unwrap() + .priority; + let priority_of_more_than_max_messages_delivery = + f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1)) + .unwrap() + .priority; + + assert!( + priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, + "Invalid priorities: {} for MAX messages vs {} for MAX+1 messages", + priority_of_max_messages_delivery, + priority_of_more_than_max_messages_delivery, + ); + } }); } @@ -1605,45 +1760,54 @@ mod tests { initialize_environment(100, 100, 100); assert_eq!( - run_validate_ignore_priority(message_delivery_call(200)), + ignore_priority(run_validate(message_delivery_call(200))), + Ok(ValidTransaction::default()), + ); + assert_eq!( + ignore_priority(run_validate(message_confirmation_call(200))), + Ok(ValidTransaction::default()), + ); + + assert_eq!( + ignore_priority(run_messages_validate(message_delivery_call(200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(message_confirmation_call(200)), + ignore_priority(run_messages_validate(message_confirmation_call(200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(parachain_finality_and_delivery_batch_call(200, 200)), + ignore_priority(run_validate(parachain_finality_and_delivery_batch_call(200, 200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(parachain_finality_and_confirmation_batch_call( + ignore_priority(run_validate(parachain_finality_and_confirmation_batch_call( 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_delivery_batch_call(200, 200, 200)), + ignore_priority(run_validate(all_finality_and_delivery_batch_call(200, 200, 200))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_delivery_batch_call_ex( + ignore_priority(run_validate(all_finality_and_delivery_batch_call_ex( 200, 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call( 200, 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); assert_eq!( - run_validate_ignore_priority(all_finality_and_confirmation_batch_call_ex( + ignore_priority(run_validate(all_finality_and_confirmation_batch_call_ex( 200, 200, 200 - )), + ))), Ok(ValidTransaction::default()), ); }); @@ -1933,8 +2097,11 @@ mod tests { RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads { at_relay_block: (100, RelayBlockHash::default()), parachains: vec![ - (ParaId(TestParachain::get()), [1u8; 32].into()), - (ParaId(TestParachain::get() + 1), [1u8; 32].into()), + (ParaId(BridgedUnderlyingParachain::PARACHAIN_ID), [1u8; 32].into()), + ( + ParaId(BridgedUnderlyingParachain::PARACHAIN_ID + 1), + [1u8; 32].into(), + ), ], parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, }), @@ -2318,6 +2485,148 @@ mod tests { }); } + #[test] + fn messages_ext_only_parses_standalone_transactions() { + run_test(|| { + initialize_environment(100, 100, 100); + + // relay + parachain + message delivery calls batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_delivery_batch_call(200, 200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_delivery_batch_call_ex(200, 200, 200) + ), + Ok(None), + ); + + // relay + parachain + message confirmation calls batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_confirmation_batch_call(200, 200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &all_finality_and_confirmation_batch_call_ex(200, 200, 200) + ), + Ok(None), + ); + + // parachain + message delivery call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + ¶chain_finality_and_delivery_batch_call(200, 200) + ), + Ok(None), + ); + + // parachain + message confirmation call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + ¶chain_finality_and_confirmation_batch_call(200, 200) + ), + Ok(None), + ); + + // relay + message delivery call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_delivery_batch_call(200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_delivery_batch_call_ex(200, 200) + ), + Ok(None), + ); + + // relay + message confirmation call batch is ignored + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_confirmation_batch_call(200, 200) + ), + Ok(None), + ); + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &relay_finality_and_confirmation_batch_call_ex(200, 200) + ), + Ok(None), + ); + + // message delivery call batch is accepted + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &message_delivery_call(200) + ), + Ok(Some(delivery_pre_dispatch_data().call_info)), + ); + + // message confirmation call batch is accepted + assert_eq!( + TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( + &message_confirmation_call(200) + ), + Ok(Some(confirmation_pre_dispatch_data().call_info)), + ); + }); + } + + #[test] + fn messages_ext_rejects_calls_with_obsolete_messages() { + run_test(|| { + initialize_environment(100, 100, 100); + + assert_eq!( + run_messages_pre_dispatch(message_delivery_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + assert_eq!( + run_messages_pre_dispatch(message_confirmation_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + + assert_eq!( + run_messages_validate(message_delivery_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + assert_eq!( + run_messages_validate(message_confirmation_call(100)), + Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), + ); + }); + } + + #[test] + fn messages_ext_accepts_calls_with_new_messages() { + run_test(|| { + initialize_environment(100, 100, 100); + + assert_eq!( + run_messages_pre_dispatch(message_delivery_call(200)), + Ok(Some(delivery_pre_dispatch_data())), + ); + assert_eq!( + run_messages_pre_dispatch(message_confirmation_call(200)), + Ok(Some(confirmation_pre_dispatch_data())), + ); + + assert_eq!(run_messages_validate(message_delivery_call(200)), Ok(Default::default()),); + assert_eq!( + run_messages_validate(message_confirmation_call(200)), + Ok(Default::default()), + ); + }); + } + #[test] fn grandpa_ext_only_parses_valid_batches() { run_test(|| { diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index ad71cd0d456d..e323f1edfc71 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -183,7 +183,8 @@ impl pallet_transaction_payment::Config for TestRuntime { impl pallet_bridge_grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = BridgedUnderlyingChain; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<1_024>; type HeadersToKeep = ConstU32<8>; type WeightInfo = pallet_bridge_grandpa::weights::BridgeWeight; } @@ -406,6 +407,7 @@ impl Chain for BridgedUnderlyingParachain { impl Parachain for BridgedUnderlyingParachain { const PARACHAIN_ID: u32 = 42; + const MAX_HEADER_SIZE: u32 = 1_024; } /// The other, bridged chain, used in tests. diff --git a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs index c49aa4b85639..a5c90ceba111 100644 --- a/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-cumulus/src/lib.rs @@ -39,6 +39,9 @@ use frame_support::{ use frame_system::limits; use sp_std::time::Duration; +/// Maximal bridge hub header size. +pub const MAX_BRIDGE_HUB_HEADER_SIZE: u32 = 4_096; + /// Average block interval in Cumulus-based parachains. /// /// Corresponds to the `MILLISECS_PER_BLOCK` from `parachains_common` crate. diff --git a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs index 576e3dbee80d..ef3ef4ab7b7a 100644 --- a/bridges/chains/chain-bridge-hub-kusama/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-kusama/src/lib.rs @@ -62,6 +62,7 @@ impl Chain for BridgeHubKusama { impl Parachain for BridgeHubKusama { const PARACHAIN_ID: u32 = BRIDGE_HUB_KUSAMA_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubKusama { diff --git a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs index 6db389c92994..9db71af928e5 100644 --- a/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-polkadot/src/lib.rs @@ -59,6 +59,7 @@ impl Chain for BridgeHubPolkadot { impl Parachain for BridgeHubPolkadot { const PARACHAIN_ID: u32 = BRIDGE_HUB_POLKADOT_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubPolkadot { diff --git a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs index abce872d7ba3..d7097f01c531 100644 --- a/bridges/chains/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-rococo/src/lib.rs @@ -59,6 +59,7 @@ impl Chain for BridgeHubRococo { impl Parachain for BridgeHubRococo { const PARACHAIN_ID: u32 = BRIDGE_HUB_ROCOCO_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubRococo { @@ -103,9 +104,9 @@ frame_support::parameter_types! { /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 5_651_581_649; + pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 314_037_860; /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_901_781; + pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 57_414_813; } diff --git a/bridges/chains/chain-bridge-hub-westend/src/lib.rs b/bridges/chains/chain-bridge-hub-westend/src/lib.rs index 4af895cc6d32..800f290d7bfa 100644 --- a/bridges/chains/chain-bridge-hub-westend/src/lib.rs +++ b/bridges/chains/chain-bridge-hub-westend/src/lib.rs @@ -58,6 +58,7 @@ impl Chain for BridgeHubWestend { impl Parachain for BridgeHubWestend { const PARACHAIN_ID: u32 = BRIDGE_HUB_WESTEND_PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; } impl ChainWithMessages for BridgeHubWestend { @@ -93,10 +94,10 @@ frame_support::parameter_types! { pub const BridgeHubWestendBaseXcmFeeInWnds: u128 = 17_756_830_000; /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 1_695_489_961_344; + /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_delivery_transaction` + `33%`) + pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 94_211_536_452; /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 1_618_309_961_344; + /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_standalone_message_confirmation_transaction` + `33%`) + pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 17_224_486_452; } diff --git a/bridges/modules/grandpa/src/call_ext.rs b/bridges/modules/grandpa/src/call_ext.rs index 4a7ebb3cc8d4..6fa62ec0cff4 100644 --- a/bridges/modules/grandpa/src/call_ext.rs +++ b/bridges/modules/grandpa/src/call_ext.rs @@ -15,20 +15,24 @@ // along with Parity Bridges Common. If not, see . use crate::{ - weights::WeightInfo, BridgedBlockNumber, BridgedHeader, Config, CurrentAuthoritySet, Error, - Pallet, + weights::WeightInfo, BestFinalized, BridgedBlockNumber, BridgedHeader, Config, + CurrentAuthoritySet, Error, FreeHeadersRemaining, Pallet, }; use bp_header_chain::{ justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size, ChainWithGrandpa, GrandpaConsensusLogReader, }; -use bp_runtime::{BlockNumberOf, OwnedBridgeModule}; +use bp_runtime::{BlockNumberOf, Chain, OwnedBridgeModule}; use codec::Encode; -use frame_support::{dispatch::CallableCallFor, traits::IsSubType, weights::Weight}; +use frame_support::{ + dispatch::CallableCallFor, + traits::{Get, IsSubType}, + weights::Weight, +}; use sp_consensus_grandpa::SetId; use sp_runtime::{ - traits::{Header, Zero}, - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + traits::{CheckedSub, Header, Zero}, + transaction_validity::{InvalidTransaction, TransactionValidityError}, RuntimeDebug, SaturatedConversion, }; @@ -40,6 +44,11 @@ pub struct SubmitFinalityProofInfo { /// An identifier of the validators set that has signed the submitted justification. /// It might be `None` if deprecated version of the `submit_finality_proof` is used. pub current_set_id: Option, + /// If `true`, then the call proves new **mandatory** header. + pub is_mandatory: bool, + /// If `true`, then the call must be free (assuming that everything else is valid) to + /// be treated as valid. + pub is_free_execution_expected: bool, /// Extra weight that we assume is included in the call. /// /// We have some assumptions about headers and justifications of the bridged chain. @@ -54,6 +63,16 @@ pub struct SubmitFinalityProofInfo { pub extra_size: u32, } +/// Verified `SubmitFinalityProofInfo`. +#[derive(Copy, Clone, PartialEq, RuntimeDebug)] +pub struct VerifiedSubmitFinalityProofInfo { + /// Base call information. + pub base: SubmitFinalityProofInfo, + /// A difference between bundled bridged header and best bridged header known to us + /// before the call. + pub improved_by: N, +} + impl SubmitFinalityProofInfo { /// Returns `true` if call size/weight is below our estimations for regular calls. pub fn fits_limits(&self) -> bool { @@ -67,14 +86,86 @@ pub struct SubmitFinalityProofHelper, I: 'static> { } impl, I: 'static> SubmitFinalityProofHelper { + /// Returns `true` if we may fit more free headers into the current block. If `false` is + /// returned, the call will be paid even if `is_free_execution_expected` has been set + /// to `true`. + pub fn has_free_header_slots() -> bool { + // `unwrap_or(u32::MAX)` means that if `FreeHeadersRemaining` is `None`, we may accept + // this header for free. That is a small cheat - it is `None` if executed outside of + // transaction (e.g. during block initialization). Normal relayer would never submit + // such calls, but if he did, that is not our problem. During normal transactions, + // the `FreeHeadersRemaining` is always `Some(_)`. + let free_headers_remaining = FreeHeadersRemaining::::get().unwrap_or(u32::MAX); + free_headers_remaining > 0 + } + + /// Check that the: (1) GRANDPA head provided by the `SubmitFinalityProof` is better than the + /// best one we know (2) if `current_set_id` matches the current authority set id, if specified + /// and (3) whether transaction MAY be free for the submitter if `is_free_execution_expected` + /// is `true`. + /// + /// Returns number of headers between the current best finalized header, known to the pallet + /// and the bundled header. + pub fn check_obsolete_from_extension( + call_info: &SubmitFinalityProofInfo>, + ) -> Result, Error> { + // do basic checks first + let improved_by = Self::check_obsolete(call_info.block_number, call_info.current_set_id)?; + + // if submitter has NOT specified that it wants free execution, then we are done + if !call_info.is_free_execution_expected { + return Ok(improved_by); + } + + // else - if we can not accept more free headers, "reject" the transaction + if !Self::has_free_header_slots() { + log::trace!( + target: crate::LOG_TARGET, + "Cannot accept free {:?} header {:?}. No more free slots remaining", + T::BridgedChain::ID, + call_info.block_number, + ); + + return Err(Error::::FreeHeadersLimitExceded); + } + + // ensure that the `improved_by` is larger than the configured free interval + if !call_info.is_mandatory { + if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { + if improved_by < free_headers_interval.into() { + log::trace!( + target: crate::LOG_TARGET, + "Cannot accept free {:?} header {:?}. Too small difference \ + between submitted headers: {:?} vs {}", + T::BridgedChain::ID, + call_info.block_number, + improved_by, + free_headers_interval, + ); + + return Err(Error::::BelowFreeHeaderInterval); + } + } + } + + // we do not check whether the header matches free submission criteria here - it is the + // relayer responsibility to check that + + Ok(improved_by) + } + /// Check that the GRANDPA head provided by the `SubmitFinalityProof` is better than the best /// one we know. Additionally, checks if `current_set_id` matches the current authority set - /// id, if specified. + /// id, if specified. This method is called by the call code and the transaction extension, + /// so it does not check the free execution. + /// + /// Returns number of headers between the current best finalized header, known to the pallet + /// and the bundled header. pub fn check_obsolete( finality_target: BlockNumberOf, current_set_id: Option, - ) -> Result<(), Error> { - let best_finalized = crate::BestFinalized::::get().ok_or_else(|| { + ) -> Result, Error> { + let best_finalized = BestFinalized::::get().ok_or_else(|| { log::trace!( target: crate::LOG_TARGET, "Cannot finalize header {:?} because pallet is not yet initialized", @@ -83,16 +174,19 @@ impl, I: 'static> SubmitFinalityProofHelper { >::NotInitialized })?; - if best_finalized.number() >= finality_target { - log::trace!( - target: crate::LOG_TARGET, - "Cannot finalize obsolete header: bundled {:?}, best {:?}", - finality_target, - best_finalized, - ); + let improved_by = match finality_target.checked_sub(&best_finalized.number()) { + Some(improved_by) if improved_by > Zero::zero() => improved_by, + _ => { + log::trace!( + target: crate::LOG_TARGET, + "Cannot finalize obsolete header: bundled {:?}, best {:?}", + finality_target, + best_finalized, + ); - return Err(Error::::OldHeader) - } + return Err(Error::::OldHeader) + }, + }; if let Some(current_set_id) = current_set_id { let actual_set_id = >::get().set_id; @@ -108,12 +202,12 @@ impl, I: 'static> SubmitFinalityProofHelper { } } - Ok(()) + Ok(improved_by) } /// Check if the `SubmitFinalityProof` was successfully executed. pub fn was_successful(finality_target: BlockNumberOf) -> bool { - match crate::BestFinalized::::get() { + match BestFinalized::::get() { Some(best_finalized) => best_finalized.number() == finality_target, None => false, } @@ -135,17 +229,20 @@ pub trait CallSubType, I: 'static>: finality_target, justification, None, + false, )) } else if let Some(crate::Call::::submit_finality_proof_ex { finality_target, justification, current_set_id, + is_free_execution_expected, }) = self.is_sub_type() { return Some(submit_finality_proof_info_from_args::( finality_target, justification, Some(*current_set_id), + *is_free_execution_expected, )) } @@ -155,26 +252,36 @@ pub trait CallSubType, I: 'static>: /// Validate Grandpa headers in order to avoid "mining" transactions that provide outdated /// bridged chain headers. Without this validation, even honest relayers may lose their funds /// if there are multiple relays running and submitting the same information. - fn check_obsolete_submit_finality_proof(&self) -> TransactionValidity + /// + /// Returns `Ok(None)` if the call is not the `submit_finality_proof` call of our pallet. + /// Returns `Ok(Some(_))` if the call is the `submit_finality_proof` call of our pallet and + /// we believe the call brings header that improves the pallet state. + /// Returns `Err(_)` if the call is the `submit_finality_proof` call of our pallet and we + /// believe that the call will fail. + fn check_obsolete_submit_finality_proof( + &self, + ) -> Result< + Option>>, + TransactionValidityError, + > where Self: Sized, { - let finality_target = match self.submit_finality_proof_info() { + let call_info = match self.submit_finality_proof_info() { Some(finality_proof) => finality_proof, - _ => return Ok(ValidTransaction::default()), + _ => return Ok(None), }; if Pallet::::ensure_not_halted().is_err() { - return InvalidTransaction::Call.into() + return Err(InvalidTransaction::Call.into()) } - match SubmitFinalityProofHelper::::check_obsolete( - finality_target.block_number, - finality_target.current_set_id, - ) { - Ok(_) => Ok(ValidTransaction::default()), - Err(Error::::OldHeader) => InvalidTransaction::Stale.into(), - Err(_) => InvalidTransaction::Call.into(), + let result = SubmitFinalityProofHelper::::check_obsolete_from_extension(&call_info); + match result { + Ok(improved_by) => + Ok(Some(VerifiedSubmitFinalityProofInfo { base: call_info, improved_by })), + Err(Error::::OldHeader) => Err(InvalidTransaction::Stale.into()), + Err(_) => Err(InvalidTransaction::Call.into()), } } } @@ -189,6 +296,7 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( finality_target: &BridgedHeader, justification: &GrandpaJustification>, current_set_id: Option, + is_free_execution_expected: bool, ) -> SubmitFinalityProofInfo> { let block_number = *finality_target.number(); @@ -230,16 +338,26 @@ pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( ); let extra_size = actual_call_size.saturating_sub(max_expected_call_size); - SubmitFinalityProofInfo { block_number, current_set_id, extra_weight, extra_size } + SubmitFinalityProofInfo { + block_number, + current_set_id, + is_mandatory: is_mandatory_finality_target, + is_free_execution_expected, + extra_weight, + extra_size, + } } #[cfg(test)] mod tests { use crate::{ call_ext::CallSubType, - mock::{run_test, test_header, RuntimeCall, TestBridgedChain, TestNumber, TestRuntime}, - BestFinalized, Config, CurrentAuthoritySet, PalletOperatingMode, StoredAuthoritySet, - SubmitFinalityProofInfo, WeightInfo, + mock::{ + run_test, test_header, FreeHeadersInterval, RuntimeCall, TestBridgedChain, TestNumber, + TestRuntime, + }, + BestFinalized, Config, CurrentAuthoritySet, FreeHeadersRemaining, PalletOperatingMode, + StoredAuthoritySet, SubmitFinalityProofInfo, WeightInfo, }; use bp_header_chain::ChainWithGrandpa; use bp_runtime::{BasicOperatingMode, HeaderId}; @@ -247,6 +365,7 @@ mod tests { make_default_justification, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_SET_ID, }; + use codec::Encode; use frame_support::weights::Weight; use sp_runtime::{testing::DigestItem, traits::Header as _, SaturatedConversion}; @@ -256,6 +375,7 @@ mod tests { justification: make_default_justification(&test_header(num)), // not initialized => zero current_set_id: 0, + is_free_execution_expected: false, }; RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( bridge_grandpa_call, @@ -311,6 +431,121 @@ mod tests { }); } + #[test] + fn extension_rejects_new_header_if_free_execution_is_requested_and_free_submissions_are_not_accepted( + ) { + run_test(|| { + let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(test_header(10 + FreeHeadersInterval::get() as u64)), + justification: make_default_justification(&test_header( + 10 + FreeHeadersInterval::get() as u64, + )), + current_set_id: 0, + is_free_execution_expected: true, + }; + sync_to_header_10(); + + // when we can accept free headers => Ok + FreeHeadersRemaining::::put(2); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + + // when we can NOT accept free headers => Err + FreeHeadersRemaining::::put(0); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_err()); + + // when called outside of transaction => Ok + FreeHeadersRemaining::::kill(); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call, + ),) + .is_ok()); + }) + } + + #[test] + fn extension_rejects_new_header_if_free_execution_is_requested_and_improved_by_is_below_expected( + ) { + run_test(|| { + let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(test_header(100)), + justification: make_default_justification(&test_header(100)), + current_set_id: 0, + is_free_execution_expected: true, + }; + sync_to_header_10(); + + // when `improved_by` is less than the free interval + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64 + 1, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_err()); + + // when `improved_by` is equal to the free interval + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + + // when `improved_by` is larger than the free interval + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64 - 1, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + + // when `improved_by` is less than the free interval BUT it is a mandatory header + let mut mandatory_header = test_header(100); + let consensus_log = sp_consensus_grandpa::ConsensusLog::::ScheduledChange( + sp_consensus_grandpa::ScheduledChange { + next_authorities: bp_test_utils::authority_list(), + delay: 0, + }, + ); + mandatory_header.digest = sp_runtime::Digest { + logs: vec![DigestItem::Consensus( + sp_consensus_grandpa::GRANDPA_ENGINE_ID, + consensus_log.encode(), + )], + }; + let justification = make_justification_for_header(JustificationGeneratorParams { + header: mandatory_header.clone(), + set_id: 1, + ..Default::default() + }); + let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(mandatory_header), + justification, + current_set_id: 0, + is_free_execution_expected: true, + }; + BestFinalized::::put(HeaderId( + 100 - FreeHeadersInterval::get() as u64 + 1, + sp_core::H256::default(), + )); + assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( + bridge_grandpa_call.clone(), + ),) + .is_ok()); + }) + } + #[test] fn extension_accepts_new_header() { run_test(|| { @@ -336,6 +571,8 @@ mod tests { current_set_id: None, extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }) ); @@ -345,6 +582,7 @@ mod tests { finality_target: Box::new(test_header(42)), justification: make_default_justification(&test_header(42)), current_set_id: 777, + is_free_execution_expected: false, }); assert_eq!( deprecated_call.submit_finality_proof_info(), @@ -353,6 +591,8 @@ mod tests { current_set_id: Some(777), extra_weight: Weight::zero(), extra_size: 0, + is_mandatory: false, + is_free_execution_expected: false, }) ); } @@ -370,6 +610,7 @@ mod tests { finality_target: Box::new(small_finality_target), justification: small_justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_eq!(small_call.submit_finality_proof_info().unwrap().extra_size, 0); @@ -387,6 +628,7 @@ mod tests { finality_target: Box::new(large_finality_target), justification: large_justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_ne!(large_call.submit_finality_proof_info().unwrap().extra_size, 0); } @@ -406,6 +648,7 @@ mod tests { finality_target: Box::new(finality_target.clone()), justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); @@ -420,7 +663,52 @@ mod tests { finality_target: Box::new(finality_target), justification, current_set_id: TEST_GRANDPA_SET_ID, + is_free_execution_expected: false, }); assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, call_weight); } + + #[test] + fn check_obsolete_submit_finality_proof_returns_correct_improved_by() { + run_test(|| { + fn make_call(number: u64) -> RuntimeCall { + RuntimeCall::Grandpa(crate::Call::::submit_finality_proof_ex { + finality_target: Box::new(test_header(number)), + justification: make_default_justification(&test_header(number)), + current_set_id: 0, + is_free_execution_expected: false, + }) + } + + sync_to_header_10(); + + // when the difference between headers is 1 + assert_eq!( + RuntimeCall::check_obsolete_submit_finality_proof(&make_call(11)) + .unwrap() + .unwrap() + .improved_by, + 1, + ); + + // when the difference between headers is 2 + assert_eq!( + RuntimeCall::check_obsolete_submit_finality_proof(&make_call(12)) + .unwrap() + .unwrap() + .improved_by, + 2, + ); + }) + } + + #[test] + fn check_obsolete_submit_finality_proof_ignores_other_calls() { + run_test(|| { + let call = + RuntimeCall::System(frame_system::Call::::remark { remark: vec![42] }); + + assert_eq!(RuntimeCall::check_obsolete_submit_finality_proof(&call), Ok(None)); + }) + } } diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index 9e095651ef81..cb536eb07ff6 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -44,6 +44,7 @@ use bp_header_chain::{ }; use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule}; use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound}; +use sp_consensus_grandpa::SetId; use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, @@ -57,6 +58,7 @@ mod storage_types; /// Module, containing weights for this pallet. pub mod weights; +pub mod weights_ext; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; @@ -65,6 +67,7 @@ pub mod benchmarking; pub use call_ext::*; pub use pallet::*; pub use weights::WeightInfo; +pub use weights_ext::WeightInfoExt; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "runtime::bridge-grandpa"; @@ -101,17 +104,31 @@ pub mod pallet { /// The chain we are bridging to here. type BridgedChain: ChainWithGrandpa; - /// Maximal number of "free" mandatory header transactions per block. + /// Maximal number of "free" header transactions per block. /// /// To be able to track the bridged chain, the pallet requires all headers that are /// changing GRANDPA authorities set at the bridged chain (we call them mandatory). - /// So it is a common good deed to submit mandatory headers to the pallet. However, if the - /// bridged chain gets compromised, its validators may generate as many mandatory headers - /// as they want. And they may fill the whole block (at this chain) for free. This constants - /// limits number of calls that we may refund in a single block. All calls above this - /// limit are accepted, but are not refunded. + /// So it is a common good deed to submit mandatory headers to the pallet. + /// + /// The pallet may be configured (see `[Self::FreeHeadersInterval]`) to import some + /// non-mandatory headers for free as well. It also may be treated as a common good + /// deed, because it may help to reduce bridge fees - this cost may be deducted from + /// bridge fees, paid by message senders. + /// + /// However, if the bridged chain gets compromised, its validators may generate as many + /// "free" headers as they want. And they may fill the whole block (at this chain) for + /// free. This constants limits number of calls that we may refund in a single block. + /// All calls above this limit are accepted, but are not refunded. + #[pallet::constant] + type MaxFreeHeadersPerBlock: Get; + + /// The distance between bridged chain headers, that may be submitted for free. The + /// first free header is header number zero, the next one is header number + /// `FreeHeadersInterval::get()` or any of its descendant if that header has not + /// been submitted. In other words, interval between free headers should be at least + /// `FreeHeadersInterval`. #[pallet::constant] - type MaxFreeMandatoryHeadersPerBlock: Get; + type FreeHeadersInterval: Get>; /// Maximal number of finalized headers to keep in the storage. /// @@ -124,7 +141,7 @@ pub mod pallet { type HeadersToKeep: Get; /// Weights gathered through benchmarking. - type WeightInfo: WeightInfo; + type WeightInfo: WeightInfoExt; } #[pallet::pallet] @@ -133,12 +150,12 @@ pub mod pallet { #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { fn on_initialize(_n: BlockNumberFor) -> Weight { - FreeMandatoryHeadersRemaining::::put(T::MaxFreeMandatoryHeadersPerBlock::get()); + FreeHeadersRemaining::::put(T::MaxFreeHeadersPerBlock::get()); Weight::zero() } fn on_finalize(_n: BlockNumberFor) { - FreeMandatoryHeadersRemaining::::kill(); + FreeHeadersRemaining::::kill(); } } @@ -155,7 +172,7 @@ pub mod pallet { /// `submit_finality_proof_ex` instead. Semantically, this call is an equivalent of the /// `submit_finality_proof_ex` call without current authority set id check. #[pallet::call_index(0)] - #[pallet::weight(::submit_finality_proof( + #[pallet::weight(T::WeightInfo::submit_finality_proof_weight( justification.commit.precommits.len().saturated_into(), justification.votes_ancestries.len().saturated_into(), ))] @@ -175,6 +192,8 @@ pub mod pallet { // the `submit_finality_proof_ex` also reads this value, but it is done from the // cache, so we don't treat it as an additional db access >::get().set_id, + // cannot enforce free execution using this call + false, ) } @@ -250,8 +269,14 @@ pub mod pallet { /// - verification is not optimized or invalid; /// /// - header contains forced authorities set change or change with non-zero delay. + /// + /// The `is_free_execution_expected` parameter is not really used inside the call. It is + /// used by the transaction extension, which should be registered at the runtime level. If + /// this parameter is `true`, the transaction will be treated as invalid, if the call won't + /// be executed for free. If transaction extension is not used by the runtime, this + /// parameter is not used at all. #[pallet::call_index(4)] - #[pallet::weight(::submit_finality_proof( + #[pallet::weight(T::WeightInfo::submit_finality_proof_weight( justification.commit.precommits.len().saturated_into(), justification.votes_ancestries.len().saturated_into(), ))] @@ -260,6 +285,7 @@ pub mod pallet { finality_target: Box>, justification: GrandpaJustification>, current_set_id: sp_consensus_grandpa::SetId, + _is_free_execution_expected: bool, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; ensure_signed(origin)?; @@ -273,7 +299,8 @@ pub mod pallet { // it checks whether the `number` is better than the current best block number // and whether the `current_set_id` matches the best known set id - SubmitFinalityProofHelper::::check_obsolete(number, Some(current_set_id))?; + let improved_by = + SubmitFinalityProofHelper::::check_obsolete(number, Some(current_set_id))?; let authority_set = >::get(); let unused_proof_size = authority_set.unused_proof_size(); @@ -283,23 +310,16 @@ pub mod pallet { let maybe_new_authority_set = try_enact_authority_change::(&finality_target, set_id)?; - let may_refund_call_fee = maybe_new_authority_set.is_some() && - // if we have seen too many mandatory headers in this block, we don't want to refund - Self::free_mandatory_headers_remaining() > 0 && - // if arguments out of expected bounds, we don't want to refund - submit_finality_proof_info_from_args::(&finality_target, &justification, Some(current_set_id)) - .fits_limits(); + let may_refund_call_fee = may_refund_call_fee::( + &finality_target, + &justification, + current_set_id, + improved_by, + ); if may_refund_call_fee { - FreeMandatoryHeadersRemaining::::mutate(|count| { - *count = count.saturating_sub(1) - }); + on_free_header_imported::(); } insert_header::(*finality_target, hash); - log::info!( - target: LOG_TARGET, - "Successfully imported finalized header with hash {:?}!", - hash - ); // mandatory header is a header that changes authorities set. The pallet can't go // further without importing this header. So every bridge MUST import mandatory headers. @@ -311,6 +331,13 @@ pub mod pallet { // to pay for the transaction. let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes }; + log::info!( + target: LOG_TARGET, + "Successfully imported finalized header with hash {:?}! Free: {}", + hash, + if may_refund_call_fee { "Yes" } else { "No" }, + ); + // the proof size component of the call weight assumes that there are // `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen` // estimation). But if their number is lower, then we may "refund" some `proof_size`, @@ -335,20 +362,18 @@ pub mod pallet { } } - /// Number mandatory headers that we may accept in the current block for free (returning - /// `Pays::No`). + /// Number of free header submissions that we may yet accept in the current block. /// - /// If the `FreeMandatoryHeadersRemaining` hits zero, all following mandatory headers in the + /// If the `FreeHeadersRemaining` hits zero, all following mandatory headers in the /// current block are accepted with fee (`Pays::Yes` is returned). /// - /// The `FreeMandatoryHeadersRemaining` is an ephemeral value that is set to - /// `MaxFreeMandatoryHeadersPerBlock` at each block initialization and is killed on block + /// The `FreeHeadersRemaining` is an ephemeral value that is set to + /// `MaxFreeHeadersPerBlock` at each block initialization and is killed on block /// finalization. So it never ends up in the storage trie. #[pallet::storage] #[pallet::whitelist_storage] - #[pallet::getter(fn free_mandatory_headers_remaining)] - pub(super) type FreeMandatoryHeadersRemaining, I: 'static = ()> = - StorageValue<_, u32, ValueQuery>; + pub type FreeHeadersRemaining, I: 'static = ()> = + StorageValue<_, u32, OptionQuery>; /// Hash of the header used to bootstrap the pallet. #[pallet::storage] @@ -473,6 +498,68 @@ pub mod pallet { /// The `current_set_id` argument of the `submit_finality_proof_ex` doesn't match /// the id of the current set, known to the pallet. InvalidAuthoritySetId, + /// The submitter wanted free execution, but we can't fit more free transactions + /// to the block. + FreeHeadersLimitExceded, + /// The submitter wanted free execution, but the difference between best known and + /// bundled header numbers is below the `FreeHeadersInterval`. + BelowFreeHeaderInterval, + } + + /// Called when new free header is imported. + pub fn on_free_header_imported, I: 'static>() { + FreeHeadersRemaining::::mutate(|count| { + *count = match *count { + None => None, + // the signed extension expects that `None` means outside of block + // execution - i.e. when transaction is validated from the transaction pool, + // so use `saturating_sub` and don't go from `Some(0)`->`None` + Some(count) => Some(count.saturating_sub(1)), + } + }); + } + + /// Return true if we may refund transaction cost to the submitter. In other words, + /// this transaction is considered as common good deed w.r.t to pallet configuration. + fn may_refund_call_fee, I: 'static>( + finality_target: &BridgedHeader, + justification: &GrandpaJustification>, + current_set_id: SetId, + improved_by: BridgedBlockNumber, + ) -> bool { + // if we have refunded too much at this block => not refunding + if FreeHeadersRemaining::::get().unwrap_or(0) == 0 { + return false; + } + + // if size/weight of call is larger than expected => not refunding + let call_info = submit_finality_proof_info_from_args::( + &finality_target, + &justification, + Some(current_set_id), + // this function is called from the transaction body and we do not want + // to do MAY-be-free-executed checks here - they had to be done in the + // transaction extension before + false, + ); + if !call_info.fits_limits() { + return false; + } + + // if that's a mandatory header => refund + if call_info.is_mandatory { + return true; + } + + // if configuration allows free non-mandatory headers and the header + // matches criteria => refund + if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { + if improved_by >= free_headers_interval.into() { + return true; + } + } + + false } /// Check the given header for a GRANDPA scheduled authority set change. If a change @@ -692,8 +779,8 @@ pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader mod tests { use super::*; use crate::mock::{ - run_test, test_header, RuntimeEvent as TestEvent, RuntimeOrigin, System, TestBridgedChain, - TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES, + run_test, test_header, FreeHeadersInterval, RuntimeEvent as TestEvent, RuntimeOrigin, + System, TestBridgedChain, TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES, }; use bp_header_chain::BridgeGrandpaCall; use bp_runtime::BasicOperatingMode; @@ -747,6 +834,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ) } @@ -766,6 +854,7 @@ mod tests { Box::new(header), justification, set_id, + false, ) } @@ -794,6 +883,7 @@ mod tests { Box::new(header), justification, set_id, + false, ) } @@ -1009,6 +1099,7 @@ mod tests { Box::new(header.clone()), justification.clone(), TEST_GRANDPA_SET_ID, + false, ), >::InvalidJustification ); @@ -1018,6 +1109,7 @@ mod tests { Box::new(header), justification, next_set_id, + false, ), >::InvalidAuthoritySetId ); @@ -1039,6 +1131,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::InvalidJustification ); @@ -1069,6 +1162,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::InvalidAuthoritySet ); @@ -1108,6 +1202,7 @@ mod tests { Box::new(header.clone()), justification.clone(), TEST_GRANDPA_SET_ID, + false, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::No); @@ -1171,6 +1266,7 @@ mod tests { Box::new(header.clone()), justification, TEST_GRANDPA_SET_ID, + false, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); @@ -1203,6 +1299,7 @@ mod tests { Box::new(header.clone()), justification, TEST_GRANDPA_SET_ID, + false, ); assert_ok!(result); assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); @@ -1233,6 +1330,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::UnsupportedScheduledChange ); @@ -1259,6 +1357,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::UnsupportedScheduledChange ); @@ -1285,6 +1384,7 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), >::TooManyAuthoritiesInSet ); @@ -1350,12 +1450,13 @@ mod tests { Box::new(header), invalid_justification, TEST_GRANDPA_SET_ID, + false, ) }; initialize_substrate_bridge(); - for _ in 0..::MaxFreeMandatoryHeadersPerBlock::get() + 1 { + for _ in 0..::MaxFreeHeadersPerBlock::get() + 1 { assert_err!(submit_invalid_request(), >::InvalidJustification); } @@ -1423,6 +1524,64 @@ mod tests { }) } + #[test] + fn may_import_non_mandatory_header_for_free() { + run_test(|| { + initialize_substrate_bridge(); + + // set best finalized to `100` + const BEST: u8 = 12; + fn reset_best() { + BestFinalized::::set(Some(HeaderId( + BEST as _, + Default::default(), + ))); + } + + // non-mandatory header is imported with fee + reset_best(); + let non_free_header_number = BEST + FreeHeadersInterval::get() as u8 - 1; + let result = submit_finality_proof(non_free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // non-mandatory free header is imported without fee + reset_best(); + let free_header_number = BEST + FreeHeadersInterval::get() as u8; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // another non-mandatory free header is imported without fee + let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 2; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // now the rate limiter starts charging fees even for free headers + let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 3; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + + // check that we can import for free if `improved_by` is larger + // than the free interval + next_block(); + reset_best(); + let free_header_number = FreeHeadersInterval::get() as u8 + 42; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + + // check that the rate limiter shares the counter between mandatory + // and free non-mandatory headers + next_block(); + reset_best(); + let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 4; + let result = submit_finality_proof(free_header_number); + assert_eq!(result.unwrap().pays_fee, Pays::No); + let result = submit_mandatory_finality_proof(free_header_number + 1, 1); + assert_eq!(result.expect("call failed").pays_fee, Pays::No); + let result = submit_mandatory_finality_proof(free_header_number + 2, 2); + assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); + }); + } + #[test] fn should_prune_headers_over_headers_to_keep_parameter() { run_test(|| { @@ -1519,9 +1678,23 @@ mod tests { Box::new(header), justification, TEST_GRANDPA_SET_ID, + false, ), DispatchError::BadOrigin, ); }) } + + #[test] + fn on_free_header_imported_never_sets_to_none() { + run_test(|| { + FreeHeadersRemaining::::set(Some(2)); + on_free_header_imported::(); + assert_eq!(FreeHeadersRemaining::::get(), Some(1)); + on_free_header_imported::(); + assert_eq!(FreeHeadersRemaining::::get(), Some(0)); + on_free_header_imported::(); + assert_eq!(FreeHeadersRemaining::::get(), Some(0)); + }) + } } diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index e689e520c92f..27df9d9c78f5 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -48,14 +48,16 @@ impl frame_system::Config for TestRuntime { } parameter_types! { - pub const MaxFreeMandatoryHeadersPerBlock: u32 = 2; + pub const MaxFreeHeadersPerBlock: u32 = 2; + pub const FreeHeadersInterval: u32 = 32; pub const HeadersToKeep: u32 = 5; } impl grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = TestBridgedChain; - type MaxFreeMandatoryHeadersPerBlock = MaxFreeMandatoryHeadersPerBlock; + type MaxFreeHeadersPerBlock = MaxFreeHeadersPerBlock; + type FreeHeadersInterval = FreeHeadersInterval; type HeadersToKeep = HeadersToKeep; type WeightInfo = (); } diff --git a/bridges/modules/grandpa/src/weights_ext.rs b/bridges/modules/grandpa/src/weights_ext.rs new file mode 100644 index 000000000000..66edea6fb6a6 --- /dev/null +++ b/bridges/modules/grandpa/src/weights_ext.rs @@ -0,0 +1,58 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Weight-related utilities. + +use crate::weights::{BridgeWeight, WeightInfo}; + +use frame_support::weights::Weight; + +/// Extended weight info. +pub trait WeightInfoExt: WeightInfo { + // Our configuration assumes that the runtime has special signed extensions used to: + // + // 1) boost priority of `submit_finality_proof` transactions; + // + // 2) slash relayer if he submits an invalid transaction. + // + // We read and update storage values of other pallets (`pallet-bridge-relayers` and + // balances/assets pallet). So we need to add this weight to the weight of our call. + // Hence two following methods. + + /// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions + /// that are declared at runtime level. + fn submit_finality_proof_overhead_from_runtime() -> Weight; + + // Functions that are directly mapped to extrinsics weights. + + /// Weight of message delivery extrinsic. + fn submit_finality_proof_weight(precommits_len: u32, votes_ancestries_len: u32) -> Weight { + let base_weight = Self::submit_finality_proof(precommits_len, votes_ancestries_len); + base_weight.saturating_add(Self::submit_finality_proof_overhead_from_runtime()) + } +} + +impl WeightInfoExt for BridgeWeight { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + Weight::zero() + } +} + +impl WeightInfoExt for () { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + Weight::zero() + } +} diff --git a/bridges/modules/parachains/src/call_ext.rs b/bridges/modules/parachains/src/call_ext.rs index da91a40a2322..fe6b319205d4 100644 --- a/bridges/modules/parachains/src/call_ext.rs +++ b/bridges/modules/parachains/src/call_ext.rs @@ -14,25 +14,45 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{Config, Pallet, RelayBlockNumber}; +use crate::{Config, GrandpaPalletOf, Pallet, RelayBlockHash, RelayBlockNumber}; +use bp_header_chain::HeaderChain; use bp_parachains::BestParaHeadHash; use bp_polkadot_core::parachains::{ParaHash, ParaId}; -use bp_runtime::OwnedBridgeModule; -use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; +use bp_runtime::{HeaderId, OwnedBridgeModule}; +use frame_support::{ + dispatch::CallableCallFor, + traits::{Get, IsSubType}, +}; +use pallet_bridge_grandpa::SubmitFinalityProofHelper; use sp_runtime::{ - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + traits::Zero, + transaction_validity::{InvalidTransaction, TransactionValidityError}, RuntimeDebug, }; /// Info about a `SubmitParachainHeads` call which tries to update a single parachain. #[derive(PartialEq, RuntimeDebug)] pub struct SubmitParachainHeadsInfo { - /// Number of the finalized relay block that has been used to prove parachain finality. - pub at_relay_block_number: RelayBlockNumber, + /// Number and hash of the finalized relay block that has been used to prove parachain + /// finality. + pub at_relay_block: HeaderId, /// Parachain identifier. pub para_id: ParaId, /// Hash of the bundled parachain head. pub para_head_hash: ParaHash, + /// If `true`, then the call must be free (assuming that everything else is valid) to + /// be treated as valid. + pub is_free_execution_expected: bool, +} + +/// Verified `SubmitParachainHeadsInfo`. +#[derive(PartialEq, RuntimeDebug)] +pub struct VerifiedSubmitParachainHeadsInfo { + /// Base call information. + pub base: SubmitParachainHeadsInfo, + /// A difference between bundled bridged relay chain header and relay chain header number + /// used to prove best bridged parachain header, known to us before the call. + pub improved_by: RelayBlockNumber, } /// Helper struct that provides methods for working with the `SubmitParachainHeads` call. @@ -41,40 +61,117 @@ pub struct SubmitParachainHeadsHelper, I: 'static> { } impl, I: 'static> SubmitParachainHeadsHelper { - /// Check if the para head provided by the `SubmitParachainHeads` is better than the best one - /// we know. - pub fn is_obsolete(update: &SubmitParachainHeadsInfo) -> bool { - let stored_best_head = match crate::ParasInfo::::get(update.para_id) { - Some(stored_best_head) => stored_best_head, - None => return false, + /// Check that is called from signed extension and takes the `is_free_execution_expected` + /// into account. + pub fn check_obsolete_from_extension( + update: &SubmitParachainHeadsInfo, + ) -> Result { + // first do all base checks + let improved_by = Self::check_obsolete(update)?; + + // if we don't expect free execution - no more checks + if !update.is_free_execution_expected { + return Ok(improved_by); + } + + // reject if no more free slots remaining in the block + if !SubmitFinalityProofHelper::::has_free_header_slots() + { + log::trace!( + target: crate::LOG_TARGET, + "The free parachain {:?} head can't be updated: no more free slots \ + left in the block.", + update.para_id, + ); + + return Err(InvalidTransaction::Call.into()); + } + + // if free headers interval is not configured and call is expected to execute + // for free => it is a relayer error, it should've been able to detect that. + let free_headers_interval = match T::FreeHeadersInterval::get() { + Some(free_headers_interval) => free_headers_interval, + None => return Ok(improved_by), }; - if stored_best_head.best_head_hash.at_relay_block_number >= update.at_relay_block_number { + // reject if we are importing parachain headers too often + if improved_by < free_headers_interval { log::trace!( target: crate::LOG_TARGET, - "The parachain head can't be updated. The parachain head for {:?} \ - was already updated at better relay chain block {} >= {}.", + "The free parachain {:?} head can't be updated: it improves previous + best head by {} while at least {} is expected.", update.para_id, - stored_best_head.best_head_hash.at_relay_block_number, - update.at_relay_block_number + improved_by, + free_headers_interval, ); - return true + + return Err(InvalidTransaction::Stale.into()); } - if stored_best_head.best_head_hash.head_hash == update.para_head_hash { + Ok(improved_by) + } + + /// Check if the para head provided by the `SubmitParachainHeads` is better than the best one + /// we know. + pub fn check_obsolete( + update: &SubmitParachainHeadsInfo, + ) -> Result { + // check if we know better parachain head already + let improved_by = match crate::ParasInfo::::get(update.para_id) { + Some(stored_best_head) => { + let improved_by = match update + .at_relay_block + .0 + .checked_sub(stored_best_head.best_head_hash.at_relay_block_number) + { + Some(improved_by) if improved_by > Zero::zero() => improved_by, + _ => { + log::trace!( + target: crate::LOG_TARGET, + "The parachain head can't be updated. The parachain head for {:?} \ + was already updated at better relay chain block {} >= {}.", + update.para_id, + stored_best_head.best_head_hash.at_relay_block_number, + update.at_relay_block.0 + ); + return Err(InvalidTransaction::Stale.into()) + }, + }; + + if stored_best_head.best_head_hash.head_hash == update.para_head_hash { + log::trace!( + target: crate::LOG_TARGET, + "The parachain head can't be updated. The parachain head hash for {:?} \ + was already updated to {} at block {} < {}.", + update.para_id, + update.para_head_hash, + stored_best_head.best_head_hash.at_relay_block_number, + update.at_relay_block.0 + ); + return Err(InvalidTransaction::Stale.into()) + } + + improved_by + }, + None => RelayBlockNumber::MAX, + }; + + // let's check if our chain had no reorgs and we still know the relay chain header + // used to craft the proof + if GrandpaPalletOf::::finalized_header_state_root(update.at_relay_block.1).is_none() { log::trace!( target: crate::LOG_TARGET, - "The parachain head can't be updated. The parachain head hash for {:?} \ - was already updated to {} at block {} < {}.", + "The parachain {:?} head can't be updated. Relay chain header {}/{} used to create \ + parachain proof is missing from the storage.", update.para_id, - update.para_head_hash, - stored_best_head.best_head_hash.at_relay_block_number, - update.at_relay_block_number + update.at_relay_block.0, + update.at_relay_block.1, ); - return true + + return Err(InvalidTransaction::Call.into()) } - false + Ok(improved_by) } /// Check if the `SubmitParachainHeads` was successfully executed. @@ -83,7 +180,7 @@ impl, I: 'static> SubmitParachainHeadsHelper { Some(stored_best_head) => stored_best_head.best_head_hash == BestParaHeadHash { - at_relay_block_number: update.at_relay_block_number, + at_relay_block_number: update.at_relay_block.0, head_hash: update.para_head_hash, }, None => false, @@ -98,22 +195,36 @@ pub trait CallSubType, I: 'static>: /// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with /// one single parachain entry. fn one_entry_submit_parachain_heads_info(&self) -> Option { - if let Some(crate::Call::::submit_parachain_heads { - ref at_relay_block, - ref parachains, - .. - }) = self.is_sub_type() - { - if let &[(para_id, para_head_hash)] = parachains.as_slice() { - return Some(SubmitParachainHeadsInfo { - at_relay_block_number: at_relay_block.0, + match self.is_sub_type() { + Some(crate::Call::::submit_parachain_heads { + ref at_relay_block, + ref parachains, + .. + }) => match ¶chains[..] { + &[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(at_relay_block.0, at_relay_block.1), para_id, para_head_hash, - }) - } + is_free_execution_expected: false, + }), + _ => None, + }, + Some(crate::Call::::submit_parachain_heads_ex { + ref at_relay_block, + ref parachains, + is_free_execution_expected, + .. + }) => match ¶chains[..] { + &[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo { + at_relay_block: HeaderId(at_relay_block.0, at_relay_block.1), + para_id, + para_head_hash, + is_free_execution_expected: *is_free_execution_expected, + }), + _ => None, + }, + _ => None, } - - None } /// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with @@ -133,24 +244,23 @@ pub trait CallSubType, I: 'static>: /// block production, or "eat" significant portion of block production time literally /// for nothing. In addition, the single-parachain-head-per-transaction is how the /// pallet will be used in our environment. - fn check_obsolete_submit_parachain_heads(&self) -> TransactionValidity + fn check_obsolete_submit_parachain_heads( + &self, + ) -> Result, TransactionValidityError> where Self: Sized, { let update = match self.one_entry_submit_parachain_heads_info() { Some(update) => update, - None => return Ok(ValidTransaction::default()), + None => return Ok(None), }; if Pallet::::ensure_not_halted().is_err() { - return InvalidTransaction::Call.into() + return Err(InvalidTransaction::Call.into()) } - if SubmitParachainHeadsHelper::::is_obsolete(&update) { - return InvalidTransaction::Stale.into() - } - - Ok(ValidTransaction::default()) + SubmitParachainHeadsHelper::::check_obsolete_from_extension(&update) + .map(|improved_by| Some(VerifiedSubmitParachainHeadsInfo { base: update, improved_by })) } } @@ -164,9 +274,10 @@ where #[cfg(test)] mod tests { use crate::{ - mock::{run_test, RuntimeCall, TestRuntime}, - CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockNumber, + mock::{run_test, FreeHeadersInterval, RuntimeCall, TestRuntime}, + CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockHash, RelayBlockNumber, }; + use bp_header_chain::StoredHeaderData; use bp_parachains::BestParaHeadHash; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use bp_runtime::BasicOperatingMode; @@ -175,15 +286,37 @@ mod tests { num: RelayBlockNumber, parachains: Vec<(ParaId, ParaHash)>, ) -> bool { - RuntimeCall::Parachains(crate::Call::::submit_parachain_heads { - at_relay_block: (num, Default::default()), + RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { + at_relay_block: (num, [num as u8; 32].into()), + parachains, + parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + is_free_execution_expected: false, + }) + .check_obsolete_submit_parachain_heads() + .is_ok() + } + + fn validate_free_submit_parachain_heads( + num: RelayBlockNumber, + parachains: Vec<(ParaId, ParaHash)>, + ) -> bool { + RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { + at_relay_block: (num, [num as u8; 32].into()), parachains, parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, + is_free_execution_expected: true, }) .check_obsolete_submit_parachain_heads() .is_ok() } + fn insert_relay_block(num: RelayBlockNumber) { + pallet_bridge_grandpa::ImportedHeaders::::insert( + RelayBlockHash::from([num as u8; 32]), + StoredHeaderData { number: num, state_root: RelayBlockHash::from([10u8; 32]) }, + ); + } + fn sync_to_relay_header_10() { ParasInfo::::insert( ParaId(1), @@ -244,6 +377,7 @@ mod tests { // when current best finalized is #10 and we're trying to import header#15 => tx is // accepted sync_to_relay_header_10(); + insert_relay_block(15); assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); }); } @@ -260,4 +394,65 @@ mod tests { )); }); } + + #[test] + fn extension_rejects_initial_parachain_head_if_missing_relay_chain_header() { + run_test(|| { + // when relay chain header is unknown => "obsolete" + assert!(!validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); + // when relay chain header is unknown => "ok" + insert_relay_block(10); + assert!(validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); + }); + } + + #[test] + fn extension_rejects_free_parachain_head_if_missing_relay_chain_header() { + run_test(|| { + sync_to_relay_header_10(); + // when relay chain header is unknown => "obsolete" + assert!(!validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())])); + // when relay chain header is unknown => "ok" + insert_relay_block(15); + assert!(validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())])); + }); + } + + #[test] + fn extension_rejects_free_parachain_head_if_no_free_slots_remaining() { + run_test(|| { + // when current best finalized is #10 and we're trying to import header#15 => tx should + // be accepted + sync_to_relay_header_10(); + insert_relay_block(15); + // ... but since we have specified `is_free_execution_expected = true`, it'll be + // rejected + assert!(!validate_free_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); + // ... if we have specify `is_free_execution_expected = false`, it'll be accepted + assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); + }); + } + + #[test] + fn extension_rejects_free_parachain_head_if_improves_by_is_below_expected() { + run_test(|| { + // when current best finalized is #10 and we're trying to import header#15 => tx should + // be accepted + sync_to_relay_header_10(); + insert_relay_block(10 + FreeHeadersInterval::get() - 1); + insert_relay_block(10 + FreeHeadersInterval::get()); + // try to submit at 10 + FreeHeadersInterval::get() - 1 => failure + let relay_header = 10 + FreeHeadersInterval::get() - 1; + assert!(!validate_free_submit_parachain_heads( + relay_header, + vec![(ParaId(1), [2u8; 32].into())] + )); + // try to submit at 10 + FreeHeadersInterval::get() => ok + let relay_header = 10 + FreeHeadersInterval::get(); + assert!(validate_free_submit_parachain_heads( + relay_header, + vec![(ParaId(1), [2u8; 32].into())] + )); + }); + } } diff --git a/bridges/modules/parachains/src/lib.rs b/bridges/modules/parachains/src/lib.rs index 1363a637604d..61e04aed3770 100644 --- a/bridges/modules/parachains/src/lib.rs +++ b/bridges/modules/parachains/src/lib.rs @@ -32,6 +32,7 @@ use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHe use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError}; use frame_support::{dispatch::PostDispatchInfo, DefaultNoBound}; +use pallet_bridge_grandpa::SubmitFinalityProofHelper; use sp_std::{marker::PhantomData, vec::Vec}; #[cfg(feature = "runtime-benchmarks")] @@ -92,7 +93,8 @@ pub mod pallet { BoundedStorageValue<>::MaxParaHeadDataSize, ParaStoredHeaderData>; /// Weight info of the given parachains pallet. pub type WeightInfoOf = >::WeightInfo; - type GrandpaPalletOf = + /// Bridge GRANDPA pallet that is used to verify parachain proofs. + pub type GrandpaPalletOf = pallet_bridge_grandpa::Pallet>::BridgesGrandpaPalletInstance>; #[pallet::event] @@ -192,6 +194,21 @@ pub mod pallet { /// /// The GRANDPA pallet instance must be configured to import headers of relay chain that /// we're interested in. + /// + /// The associated GRANDPA pallet is also used to configure free parachain heads + /// submissions. The parachain head submission will be free if: + /// + /// 1) the submission contains exactly one parachain head update that succeeds; + /// + /// 2) the difference between relay chain block numbers, used to prove new parachain head + /// and previous best parachain head is larger than the `FreeHeadersInterval`, configured + /// at the associated GRANDPA pallet; + /// + /// 3) there are slots for free submissions, remaining at the block. This is also configured + /// at the associated GRANDPA pallet using `MaxFreeHeadersPerBlock` parameter. + /// + /// First parachain head submission is also free for the submitted, if free submissions + /// are yet accepted to this block. type BridgesGrandpaPalletInstance: 'static; /// Name of the original `paras` pallet in the `construct_runtime!()` call at the bridged @@ -335,10 +352,83 @@ pub mod pallet { at_relay_block: (RelayBlockNumber, RelayBlockHash), parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, + ) -> DispatchResultWithPostInfo { + Self::submit_parachain_heads_ex( + origin, + at_relay_block, + parachains, + parachain_heads_proof, + false, + ) + } + + /// Change `PalletOwner`. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::call_index(1)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { + >::set_owner(origin, new_owner) + } + + /// Halt or resume all pallet operations. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::call_index(2)] + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + operating_mode: BasicOperatingMode, + ) -> DispatchResult { + >::set_operating_mode(origin, operating_mode) + } + + /// Submit proof of one or several parachain heads. + /// + /// The proof is supposed to be proof of some `Heads` entries from the + /// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain. + /// The proof is supposed to be crafted at the `relay_header_hash` that must already be + /// imported by corresponding GRANDPA pallet at this chain. + /// + /// The call fails if: + /// + /// - the pallet is halted; + /// + /// - the relay chain block `at_relay_block` is not imported by the associated bridge + /// GRANDPA pallet. + /// + /// The call may succeed, but some heads may not be updated e.g. because pallet knows + /// better head or it isn't tracked by the pallet. + /// + /// The `is_free_execution_expected` parameter is not really used inside the call. It is + /// used by the transaction extension, which should be registered at the runtime level. If + /// this parameter is `true`, the transaction will be treated as invalid, if the call won't + /// be executed for free. If transaction extension is not used by the runtime, this + /// parameter is not used at all. + #[pallet::call_index(3)] + #[pallet::weight(WeightInfoOf::::submit_parachain_heads_weight( + T::DbWeight::get(), + parachain_heads_proof, + parachains.len() as _, + ))] + pub fn submit_parachain_heads_ex( + origin: OriginFor, + at_relay_block: (RelayBlockNumber, RelayBlockHash), + parachains: Vec<(ParaId, ParaHash)>, + parachain_heads_proof: ParaHeadsProof, + _is_free_execution_expected: bool, ) -> DispatchResultWithPostInfo { Self::ensure_not_halted().map_err(Error::::BridgeModule)?; ensure_signed(origin)?; + let total_parachains = parachains.len(); + let free_headers_interval = + T::FreeHeadersInterval::get().unwrap_or(RelayBlockNumber::MAX); + // the pallet allows two kind of free submissions + // 1) if distance between all parachain heads is gte than the [`T::FreeHeadersInterval`] + // 2) if all heads are the first heads of their parachains + let mut free_parachain_heads = 0; + // we'll need relay chain header to verify that parachains heads are always increasing. let (relay_block_number, relay_block_hash) = at_relay_block; let relay_block = pallet_bridge_grandpa::ImportedHeaders::< @@ -358,6 +448,7 @@ pub mod pallet { parachains.len() as _, ); + let mut is_updated_something = false; let mut storage = GrandpaPalletOf::::storage_proof_checker( relay_block_hash, parachain_heads_proof.storage_proof, @@ -414,6 +505,7 @@ pub mod pallet { } // convert from parachain head into stored parachain head data + let parachain_head_size = parachain_head.0.len(); let parachain_head_data = match T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) { Some(parachain_head_data) => parachain_head_data, @@ -430,13 +522,30 @@ pub mod pallet { let update_result: Result<_, ()> = ParasInfo::::try_mutate(parachain, |stored_best_head| { + let is_free = parachain_head_size < + T::ParaStoredHeaderDataBuilder::max_free_head_size() as usize && + match stored_best_head { + Some(ref best_head) + if at_relay_block.0.saturating_sub( + best_head.best_head_hash.at_relay_block_number, + ) >= free_headers_interval => + true, + Some(_) => false, + None => true, + }; let artifacts = Pallet::::update_parachain_head( parachain, stored_best_head.take(), - relay_block_number, + HeaderId(relay_block_number, relay_block_hash), parachain_head_data, parachain_head_hash, )?; + + is_updated_something = true; + if is_free { + free_parachain_heads = free_parachain_heads + 1; + } + *stored_best_head = Some(artifacts.best_head); Ok(artifacts.prune_happened) }); @@ -467,28 +576,21 @@ pub mod pallet { Error::::HeaderChainStorageProof(HeaderChainError::StorageProof(e)) })?; - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - >::set_owner(origin, new_owner) - } + // check if we allow this submission for free + let is_free = total_parachains == 1 + && free_parachain_heads == total_parachains + && SubmitFinalityProofHelper::::has_free_header_slots(); + let pays_fee = if is_free { + log::trace!(target: LOG_TARGET, "Parachain heads update transaction is free"); + pallet_bridge_grandpa::on_free_header_imported::( + ); + Pays::No + } else { + log::trace!(target: LOG_TARGET, "Parachain heads update transaction is paid"); + Pays::Yes + }; - /// Halt or resume all pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(2)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: BasicOperatingMode, - ) -> DispatchResult { - >::set_operating_mode(origin, operating_mode) + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) } } @@ -545,18 +647,20 @@ pub mod pallet { pub(super) fn update_parachain_head( parachain: ParaId, stored_best_head: Option, - new_at_relay_block_number: RelayBlockNumber, + new_at_relay_block: HeaderId, new_head_data: ParaStoredHeaderData, new_head_hash: ParaHash, ) -> Result { // check if head has been already updated at better relay chain block. Without this // check, we may import heads in random order let update = SubmitParachainHeadsInfo { - at_relay_block_number: new_at_relay_block_number, + at_relay_block: new_at_relay_block, para_id: parachain, para_head_hash: new_head_hash, + // doesn't actually matter here + is_free_execution_expected: false, }; - if SubmitParachainHeadsHelper::::is_obsolete(&update) { + if SubmitParachainHeadsHelper::::check_obsolete(&update).is_err() { Self::deposit_event(Event::RejectedObsoleteParachainHead { parachain, parachain_head_hash: new_head_hash, @@ -596,7 +700,7 @@ pub mod pallet { ImportedParaHashes::::try_get(parachain, next_imported_hash_position); let updated_best_para_head = ParaInfo { best_head_hash: BestParaHeadHash { - at_relay_block_number: new_at_relay_block_number, + at_relay_block_number: new_at_relay_block.0, head_hash: new_head_hash, }, next_imported_hash_position: (next_imported_hash_position + 1) % @@ -610,9 +714,10 @@ pub mod pallet { ImportedParaHeads::::insert(parachain, new_head_hash, updated_head_data); log::trace!( target: LOG_TARGET, - "Updated head of parachain {:?} to {}", + "Updated head of parachain {:?} to {} at relay block {}", parachain, new_head_hash, + new_at_relay_block.0, ); // remove old head @@ -696,14 +801,28 @@ impl, I: 'static, C: Parachain> HeaderChain pub fn initialize_for_benchmarks, I: 'static, PC: Parachain>( header: HeaderOf, ) { + use bp_runtime::HeaderIdProvider; + use sp_runtime::traits::Header; + + let relay_head = + pallet_bridge_grandpa::BridgedHeader::::new( + 0, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ); let parachain = ParaId(PC::PARACHAIN_ID); let parachain_head = ParaHead(header.encode()); let updated_head_data = T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) .expect("failed to build stored parachain head in benchmarks"); + pallet_bridge_grandpa::initialize_for_benchmarks::( + relay_head.clone(), + ); Pallet::::update_parachain_head( parachain, None, - 0, + relay_head.id(), updated_head_data, parachain_head.hash(), ) @@ -714,9 +833,9 @@ pub fn initialize_for_benchmarks, I: 'static, PC: Parachain::DbWeight; pub(crate) fn initialize(state_root: RelayBlockHash) -> RelayBlockHash { + pallet_bridge_grandpa::FreeHeadersRemaining::::set(Some(100)); pallet_bridge_grandpa::Pallet::::initialize( RuntimeOrigin::root(), bp_header_chain::InitializationData { @@ -770,10 +891,6 @@ pub(crate) mod tests { num: RelayBlockNumber, state_root: RelayBlockHash, ) -> (ParaHash, GrandpaJustification) { - pallet_bridge_grandpa::Pallet::::on_initialize( - 0, - ); - let header = test_relay_header(num, state_root); let hash = header.hash(); let justification = make_default_justification(&header); @@ -783,6 +900,7 @@ pub(crate) mod tests { Box::new(header), justification.clone(), TEST_GRANDPA_SET_ID, + false, ) ); @@ -908,7 +1026,7 @@ pub(crate) mod tests { run_test(|| { initialize(state_root); - // we're trying to update heads of parachains 1, 2 and 3 + // we're trying to update heads of parachains 1 and 3 let expected_weight = WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 2); let result = Pallet::::submit_parachain_heads( @@ -918,9 +1036,10 @@ pub(crate) mod tests { proof, ); assert_ok!(result); + assert_eq!(result.expect("checked above").pays_fee, Pays::Yes); assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); - // but only 1 and 2 are updated, because proof is missing head of parachain#2 + // 1 and 3 are updated, because proof is missing head of parachain#2 assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); assert_eq!(ParasInfo::::get(ParaId(2)), None); assert_eq!( @@ -989,7 +1108,9 @@ pub(crate) mod tests { run_test(|| { // start with relay block #0 and import head#5 of parachain#1 initialize(state_root_5); - assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); + let result = import_parachain_1_head(0, state_root_5, parachains_5, proof_5); + // first parachain head is imported for free + assert_eq!(result.unwrap().pays_fee, Pays::No); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { @@ -1024,7 +1145,9 @@ pub(crate) mod tests { // import head#10 of parachain#1 at relay block #1 let (relay_1_hash, justification) = proceed(1, state_root_10); - assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); + let result = import_parachain_1_head(1, state_root_10, parachains_10, proof_10); + // second parachain head is imported for fee + assert_eq!(result.unwrap().pays_fee, Pays::Yes); assert_eq!( ParasInfo::::get(ParaId(1)), Some(ParaInfo { @@ -1647,4 +1770,143 @@ pub(crate) mod tests { ); }) } + + #[test] + fn may_be_free_for_submitting_filtered_heads() { + run_test(|| { + let (state_root, proof, parachains) = + prepare_parachain_heads_proof::(vec![(2, head_data(2, 5))]); + // start with relay block #0 and import head#5 of parachain#2 + initialize(state_root); + // first submission is free + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (0, test_relay_header(0, state_root).hash()), + parachains.clone(), + proof.clone(), + ); + assert_eq!(result.unwrap().pays_fee, Pays::No); + // next submission is NOT free, because we haven't updated anything + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (0, test_relay_header(0, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + // then we submit new head, proved at relay block `FreeHeadersInterval - 1` => Pays::Yes + let (state_root, proof, parachains) = prepare_parachain_heads_proof::< + RegularParachainHeader, + >(vec![(2, head_data(2, 50))]); + let relay_block_number = FreeHeadersInterval::get() - 1; + proceed(relay_block_number, state_root); + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + // then we submit new head, proved after `FreeHeadersInterval` => Pays::No + let (state_root, proof, parachains) = prepare_parachain_heads_proof::< + RegularParachainHeader, + >(vec![(2, head_data(2, 100))]); + let relay_block_number = relay_block_number + FreeHeadersInterval::get(); + proceed(relay_block_number, state_root); + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::No); + // then we submit new BIG head, proved after `FreeHeadersInterval` => Pays::Yes + // then we submit new head, proved after `FreeHeadersInterval` => Pays::No + let mut large_head = head_data(2, 100); + large_head.0.extend(&[42u8; BigParachain::MAX_HEADER_SIZE as _]); + let (state_root, proof, parachains) = + prepare_parachain_heads_proof::(vec![(2, large_head)]); + let relay_block_number = relay_block_number + FreeHeadersInterval::get(); + proceed(relay_block_number, state_root); + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + }) + } + + #[test] + fn grandpa_and_parachain_pallets_share_free_headers_counter() { + run_test(|| { + initialize(Default::default()); + // set free headers limit to `4` + let mut free_headers_remaining = 4; + pallet_bridge_grandpa::FreeHeadersRemaining::::set( + Some(free_headers_remaining), + ); + // import free GRANDPA and parachain headers + let mut relay_block_number = 0; + for i in 0..2 { + // import free GRANDPA header + let (state_root, proof, parachains) = prepare_parachain_heads_proof::< + RegularParachainHeader, + >(vec![(2, head_data(2, 5 + i))]); + relay_block_number = relay_block_number + FreeHeadersInterval::get(); + proceed(relay_block_number, state_root); + assert_eq!( + pallet_bridge_grandpa::FreeHeadersRemaining::< + TestRuntime, + BridgesGrandpaPalletInstance, + >::get(), + Some(free_headers_remaining - 1), + ); + free_headers_remaining = free_headers_remaining - 1; + // import free parachain header + assert_ok!(Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ),); + assert_eq!( + pallet_bridge_grandpa::FreeHeadersRemaining::< + TestRuntime, + BridgesGrandpaPalletInstance, + >::get(), + Some(free_headers_remaining - 1), + ); + free_headers_remaining = free_headers_remaining - 1; + } + // try to import free GRANDPA header => non-free execution + let (state_root, proof, parachains) = + prepare_parachain_heads_proof::(vec![(2, head_data(2, 7))]); + relay_block_number = relay_block_number + FreeHeadersInterval::get(); + let result = pallet_bridge_grandpa::Pallet::::submit_finality_proof_ex( + RuntimeOrigin::signed(1), + Box::new(test_relay_header(relay_block_number, state_root)), + make_default_justification(&test_relay_header(relay_block_number, state_root)), + TEST_GRANDPA_SET_ID, + false, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + // try to import free parachain header => non-free execution + let result = Pallet::::submit_parachain_heads( + RuntimeOrigin::signed(1), + (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), + parachains, + proof, + ); + assert_eq!(result.unwrap().pays_fee, Pays::Yes); + assert_eq!( + pallet_bridge_grandpa::FreeHeadersRemaining::< + TestRuntime, + BridgesGrandpaPalletInstance, + >::get(), + Some(0), + ); + }); + } } diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index d9cbabf850ec..dbb62845392d 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -70,6 +70,7 @@ impl Chain for Parachain1 { impl Parachain for Parachain1 { const PARACHAIN_ID: u32 = 1; + const MAX_HEADER_SIZE: u32 = 1_024; } pub struct Parachain2; @@ -96,6 +97,7 @@ impl Chain for Parachain2 { impl Parachain for Parachain2 { const PARACHAIN_ID: u32 = 2; + const MAX_HEADER_SIZE: u32 = 1_024; } pub struct Parachain3; @@ -122,6 +124,7 @@ impl Chain for Parachain3 { impl Parachain for Parachain3 { const PARACHAIN_ID: u32 = 3; + const MAX_HEADER_SIZE: u32 = 1_024; } // this parachain is using u128 as block number and stored head data size exceeds limit @@ -149,6 +152,7 @@ impl Chain for BigParachain { impl Parachain for BigParachain { const PARACHAIN_ID: u32 = 4; + const MAX_HEADER_SIZE: u32 = 2_048; } construct_runtime! { @@ -168,12 +172,14 @@ impl frame_system::Config for TestRuntime { parameter_types! { pub const HeadersToKeep: u32 = 5; + pub const FreeHeadersInterval: u32 = 15; } impl pallet_bridge_grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = TestBridgedChain; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<2>; + type MaxFreeHeadersPerBlock = ConstU32<2>; + type FreeHeadersInterval = FreeHeadersInterval; type HeadersToKeep = HeadersToKeep; type WeightInfo = (); } @@ -181,7 +187,8 @@ impl pallet_bridge_grandpa::Config for TestRun impl pallet_bridge_grandpa::Config for TestRuntime { type RuntimeEvent = RuntimeEvent; type BridgedChain = TestBridgedChain; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<2>; + type MaxFreeHeadersPerBlock = ConstU32<2>; + type FreeHeadersInterval = FreeHeadersInterval; type HeadersToKeep = HeadersToKeep; type WeightInfo = (); } diff --git a/bridges/modules/parachains/src/weights_ext.rs b/bridges/modules/parachains/src/weights_ext.rs index 393086a85690..64dad625de08 100644 --- a/bridges/modules/parachains/src/weights_ext.rs +++ b/bridges/modules/parachains/src/weights_ext.rs @@ -36,6 +36,20 @@ pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Extended weight info. pub trait WeightInfoExt: WeightInfo { + // Our configuration assumes that the runtime has special signed extensions used to: + // + // 1) boost priority of `submit_parachain_heads` transactions; + // + // 2) slash relayer if he submits an invalid transaction. + // + // We read and update storage values of other pallets (`pallet-bridge-relayers` and + // balances/assets pallet). So we need to add this weight to the weight of our call. + // Hence two following methods. + + /// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions + /// that are declared at runtime level. + fn submit_parachain_heads_overhead_from_runtime() -> Weight; + /// Storage proof overhead, that is included in every storage proof. /// /// The relayer would pay some extra fee for additional proof bytes, since they mean @@ -65,7 +79,10 @@ pub trait WeightInfoExt: WeightInfo { let pruning_weight = Self::parachain_head_pruning_weight(db_weight).saturating_mul(parachains_count as u64); - base_weight.saturating_add(proof_size_overhead).saturating_add(pruning_weight) + base_weight + .saturating_add(proof_size_overhead) + .saturating_add(pruning_weight) + .saturating_add(Self::submit_parachain_heads_overhead_from_runtime()) } /// Returns weight of single parachain head storage update. @@ -95,12 +112,20 @@ pub trait WeightInfoExt: WeightInfo { } impl WeightInfoExt for () { + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + Weight::zero() + } + fn expected_extra_storage_proof_size() -> u32 { EXTRA_STORAGE_PROOF_SIZE } } impl WeightInfoExt for BridgeWeight { + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + Weight::zero() + } + fn expected_extra_storage_proof_size() -> u32 { EXTRA_STORAGE_PROOF_SIZE } diff --git a/bridges/primitives/parachains/src/lib.rs b/bridges/primitives/parachains/src/lib.rs index 692bbd99ecef..142c6e9b0892 100644 --- a/bridges/primitives/parachains/src/lib.rs +++ b/bridges/primitives/parachains/src/lib.rs @@ -116,6 +116,10 @@ impl ParaStoredHeaderData { /// Stored parachain head data builder. pub trait ParaStoredHeaderDataBuilder { + /// Maximal parachain head size that we may accept for free. All heads above + /// this limit are submitted for a regular fee. + fn max_free_head_size() -> u32; + /// Return number of parachains that are supported by this builder. fn supported_parachains() -> u32; @@ -127,6 +131,10 @@ pub trait ParaStoredHeaderDataBuilder { pub struct SingleParaStoredHeaderDataBuilder(PhantomData); impl ParaStoredHeaderDataBuilder for SingleParaStoredHeaderDataBuilder { + fn max_free_head_size() -> u32 { + C::MAX_HEADER_SIZE + } + fn supported_parachains() -> u32 { 1 } @@ -147,6 +155,17 @@ impl ParaStoredHeaderDataBuilder for SingleParaStoredHeaderDataBui #[impl_trait_for_tuples::impl_for_tuples(1, 30)] #[tuple_types_custom_trait_bound(Parachain)] impl ParaStoredHeaderDataBuilder for C { + fn max_free_head_size() -> u32 { + let mut result = 0_u32; + for_tuples!( #( + result = sp_std::cmp::max( + result, + SingleParaStoredHeaderDataBuilder::::max_free_head_size(), + ); + )* ); + result + } + fn supported_parachains() -> u32 { let mut result = 0; for_tuples!( #( diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 4ec5a001a99e..1b1c623104f9 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -236,6 +236,12 @@ where pub trait Parachain: Chain { /// Parachain identifier. const PARACHAIN_ID: u32; + /// Maximal size of the parachain header. + /// + /// This isn't a strict limit. The relayer may submit larger headers and the + /// pallet will accept the call. The limit is only used to compute whether + /// the refund can be made. + const MAX_HEADER_SIZE: u32; } impl Parachain for T @@ -244,6 +250,8 @@ where ::Chain: Parachain, { const PARACHAIN_ID: u32 = <::Chain as Parachain>::PARACHAIN_ID; + const MAX_HEADER_SIZE: u32 = + <::Chain as Parachain>::MAX_HEADER_SIZE; } /// Adapter for `Get` to access `PARACHAIN_ID` from `trait Parachain` @@ -306,6 +314,11 @@ macro_rules! decl_bridge_finality_runtime_apis { pub const []: &str = stringify!([<$chain:camel FinalityApi_best_finalized>]); + /// Name of the `FinalityApi::free_headers_interval` runtime method. + pub const []: &str = + stringify!([<$chain:camel FinalityApi_free_headers_interval>]); + + $( /// Name of the `FinalityApi::accepted__finality_proofs` /// runtime method. @@ -322,6 +335,13 @@ macro_rules! decl_bridge_finality_runtime_apis { /// Returns number and hash of the best finalized header known to the bridge module. fn best_finalized() -> Option>; + /// Returns free headers interval, if it is configured in the runtime. + /// The caller expects that if his transaction improves best known header + /// at least by the free_headers_interval`, it will be fee-free. + /// + /// See [`pallet_bridge_grandpa::Config::FreeHeadersInterval`] for details. + fn free_headers_interval() -> Option; + $( /// Returns the justifications accepted in the current block. fn []( diff --git a/bridges/relays/client-substrate/src/test_chain.rs b/bridges/relays/client-substrate/src/test_chain.rs index 77240d15884f..d1203a2c58ea 100644 --- a/bridges/relays/client-substrate/src/test_chain.rs +++ b/bridges/relays/client-substrate/src/test_chain.rs @@ -110,6 +110,7 @@ impl bp_runtime::Chain for TestParachainBase { impl bp_runtime::Parachain for TestParachainBase { const PARACHAIN_ID: u32 = 1000; + const MAX_HEADER_SIZE: u32 = 1_024; } /// Parachain that may be used in tests. diff --git a/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml index 52271f944213..f59f689bf6b5 100644 --- a/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml +++ b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml @@ -40,7 +40,7 @@ cumulus_based = true rpc_port = 8933 ws_port = 8943 args = [ - "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] # run bob as parachain collator @@ -51,7 +51,7 @@ cumulus_based = true rpc_port = 8934 ws_port = 8944 args = [ - "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] [[parachains]] @@ -65,14 +65,14 @@ cumulus_based = true ws_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] [[parachains.collators]] name = "asset-hub-rococo-collator2" command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] #[[hrmp_channels]] diff --git a/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml index f2550bcc9959..6ab03ad5fe2c 100644 --- a/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml +++ b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml @@ -40,7 +40,7 @@ cumulus_based = true rpc_port = 8935 ws_port = 8945 args = [ - "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] # run bob as parachain collator @@ -51,7 +51,7 @@ cumulus_based = true rpc_port = 8936 ws_port = 8946 args = [ - "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace" + "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] [[parachains]] @@ -65,14 +65,14 @@ cumulus_based = true ws_port = 9010 command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" + "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" ] #[[hrmp_channels]] diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index 41aa862be576..2f11692d97b9 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -169,12 +169,107 @@ function run_relay() { --lane "${LANE_ID}" } +function run_finality_relay() { + local relayer_path=$(ensure_relayer) + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-headers rococo-to-bridge-hub-westend \ + --only-free-headers \ + --source-host localhost \ + --source-port 9942 \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Charlie \ + --target-transactions-mortality 4& + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-headers westend-to-bridge-hub-rococo \ + --only-free-headers \ + --source-host localhost \ + --source-port 9945 \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Charlie \ + --target-transactions-mortality 4 +} + +function run_parachains_relay() { + local relayer_path=$(ensure_relayer) + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-parachains rococo-to-bridge-hub-westend \ + --only-free-headers \ + --source-host localhost \ + --source-port 9942 \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Dave \ + --target-transactions-mortality 4& + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-parachains westend-to-bridge-hub-rococo \ + --only-free-headers \ + --source-host localhost \ + --source-port 9945 \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Dave \ + --target-transactions-mortality 4 +} + +function run_messages_relay() { + local relayer_path=$(ensure_relayer) + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-messages bridge-hub-rococo-to-bridge-hub-westend \ + --source-host localhost \ + --source-port 8943 \ + --source-version-mode Auto \ + --source-signer //Eve \ + --source-transactions-mortality 4 \ + --target-host localhost \ + --target-port 8945 \ + --target-version-mode Auto \ + --target-signer //Eve \ + --target-transactions-mortality 4 \ + --lane $LANE_ID& + + RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ + $relayer_path relay-messages bridge-hub-westend-to-bridge-hub-rococo \ + --source-host localhost \ + --source-port 8945 \ + --source-version-mode Auto \ + --source-signer //Ferdie \ + --source-transactions-mortality 4 \ + --target-host localhost \ + --target-port 8943 \ + --target-version-mode Auto \ + --target-signer //Ferdie \ + --target-transactions-mortality 4 \ + --lane $LANE_ID +} + case "$1" in run-relay) init_wnd_ro init_ro_wnd run_relay ;; + run-finality-relay) + init_wnd_ro + init_ro_wnd + run_finality_relay + ;; + run-parachains-relay) + run_parachains_relay + ;; + run-messages-relay) + run_messages_relay + ;; init-asset-hub-rococo-local) ensure_polkadot_js_api # create foreign assets for native Westend token (governance call on Rococo) @@ -386,6 +481,9 @@ case "$1" in echo "A command is require. Supported commands for: Local (zombienet) run: - run-relay + - run-finality-relay + - run-parachains-relay + - run-messages-relay - init-asset-hub-rococo-local - init-bridge-hub-rococo-local - init-asset-hub-westend-local diff --git a/bridges/testing/environments/rococo-westend/explorers.sh b/bridges/testing/environments/rococo-westend/explorers.sh new file mode 100755 index 000000000000..fb137726c93c --- /dev/null +++ b/bridges/testing/environments/rococo-westend/explorers.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Rococo AH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9910#/explorer& +# Rococo BH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer& + +# Westend BH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer& +# Westend AH +xdg-open https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer& diff --git a/bridges/testing/environments/rococo-westend/helper.sh b/bridges/testing/environments/rococo-westend/helper.sh index 0a13ded213f5..571c78fea584 100755 --- a/bridges/testing/environments/rococo-westend/helper.sh +++ b/bridges/testing/environments/rococo-westend/helper.sh @@ -1,3 +1,9 @@ #!/bin/bash -$ENV_PATH/bridges_rococo_westend.sh "$@" +if [ $1 == "auto-log" ]; then + shift # ignore "auto-log" + log_name=$1 + $ENV_PATH/bridges_rococo_westend.sh "$@" >$TEST_DIR/logs/$log_name.log +else + $ENV_PATH/bridges_rococo_westend.sh "$@" +fi diff --git a/bridges/testing/environments/rococo-westend/spawn.sh b/bridges/testing/environments/rococo-westend/spawn.sh index cbd0b1bc623a..a0ab00be1444 100755 --- a/bridges/testing/environments/rococo-westend/spawn.sh +++ b/bridges/testing/environments/rococo-westend/spawn.sh @@ -59,12 +59,12 @@ if [[ $init -eq 1 ]]; then fi if [[ $start_relayer -eq 1 ]]; then - ${BASH_SOURCE%/*}/start_relayer.sh $rococo_dir $westend_dir relayer_pid + ${BASH_SOURCE%/*}/start_relayer.sh $rococo_dir $westend_dir finality_relayer_pid parachains_relayer_pid messages_relayer_pid fi echo $rococo_dir > $TEST_DIR/rococo.env echo $westend_dir > $TEST_DIR/westend.env echo -wait -n $rococo_pid $westend_pid $relayer_pid +wait -n $rococo_pid $westend_pid $finality_relayer_pid $parachains_relayer_pid $messages_relayer_pid kill -9 -$$ diff --git a/bridges/testing/environments/rococo-westend/start_relayer.sh b/bridges/testing/environments/rococo-westend/start_relayer.sh index 7ddd312d395a..9c57e4a6ab6e 100755 --- a/bridges/testing/environments/rococo-westend/start_relayer.sh +++ b/bridges/testing/environments/rococo-westend/start_relayer.sh @@ -7,17 +7,31 @@ source "$FRAMEWORK_PATH/utils/zombienet.sh" rococo_dir=$1 westend_dir=$2 -__relayer_pid=$3 +__finality_relayer_pid=$3 +__parachains_relayer_pid=$4 +__messages_relayer_pid=$5 logs_dir=$TEST_DIR/logs helper_script="${BASH_SOURCE%/*}/helper.sh" -relayer_log=$logs_dir/relayer.log -echo -e "Starting rococo-westend relayer. Logs available at: $relayer_log\n" -start_background_process "$helper_script run-relay" $relayer_log relayer_pid +# start finality relayer +finality_relayer_log=$logs_dir/relayer_finality.log +echo -e "Starting rococo-westend finality relayer. Logs available at: $finality_relayer_log\n" +start_background_process "$helper_script run-finality-relay" $finality_relayer_log finality_relayer_pid + +# start parachains relayer +parachains_relayer_log=$logs_dir/relayer_parachains.log +echo -e "Starting rococo-westend parachains relayer. Logs available at: $parachains_relayer_log\n" +start_background_process "$helper_script run-parachains-relay" $parachains_relayer_log parachains_relayer_pid + +# start messages relayer +messages_relayer_log=$logs_dir/relayer_messages.log +echo -e "Starting rococo-westend messages relayer. Logs available at: $messages_relayer_log\n" +start_background_process "$helper_script run-messages-relay" $messages_relayer_log messages_relayer_pid run_zndsl ${BASH_SOURCE%/*}/rococo.zndsl $rococo_dir run_zndsl ${BASH_SOURCE%/*}/westend.zndsl $westend_dir -eval $__relayer_pid="'$relayer_pid'" - +eval $__finality_relayer_pid="'$finality_relayer_pid'" +eval $__parachains_relayer_pid="'$parachains_relayer_pid'" +eval $__messages_relayer_pid="'$messages_relayer_pid'" diff --git a/bridges/testing/framework/js-helpers/native-asset-balance.js b/bridges/testing/framework/js-helpers/native-asset-balance.js new file mode 100644 index 000000000000..4869eba35d8d --- /dev/null +++ b/bridges/testing/framework/js-helpers/native-asset-balance.js @@ -0,0 +1,12 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const accountAddress = args[0]; + const accountData = await api.query.system.account(accountAddress); + const accountBalance = accountData.data['free']; + console.log("Balance of " + accountAddress + ": " + accountBalance); + return accountBalance; +} + +module.exports = {run} diff --git a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl index cdb7d28e940c..6e26632fd9f9 100644 --- a/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/roc-reaches-westend.zndsl @@ -3,10 +3,10 @@ Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml Creds: config # send 5 ROC to //Alice from Rococo AH to Westend AH -asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds +asset-hub-westend-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-rococo-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 ROC on Westend AH asset-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Rococo" within 600 seconds -# check that the relayer //Charlie is rewarded by Westend AH -bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 30 seconds +# relayer //Ferdie is rewarded for delivering messages from Rococo BH +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5HGjWAeFDfFCWPsjFQdVV2Msvz2XtMktvgocEZcCj68kUMaw,0x00000002,0x6268726F,ThisChain,0" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl b/bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl new file mode 100644 index 000000000000..4839c19c0ff2 --- /dev/null +++ b/bridges/testing/tests/0001-asset-transfer/roc-relayer-balance-does-not-change.zndsl @@ -0,0 +1,11 @@ +Description: Finality and parachain relays should have the constant balance, because their transactions are free +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/run.sh b/bridges/testing/tests/0001-asset-transfer/run.sh index a7bb122919b4..227069932f2d 100755 --- a/bridges/testing/tests/0001-asset-transfer/run.sh +++ b/bridges/testing/tests/0001-asset-transfer/run.sh @@ -18,8 +18,14 @@ ensure_process_file $env_pid $TEST_DIR/westend.env 300 westend_dir=`cat $TEST_DIR/westend.env` echo +run_zndsl ${BASH_SOURCE%/*}/roc-relayer-balance-does-not-change.zndsl $rococo_dir +run_zndsl ${BASH_SOURCE%/*}/wnd-relayer-balance-does-not-change.zndsl $westend_dir + run_zndsl ${BASH_SOURCE%/*}/roc-reaches-westend.zndsl $westend_dir run_zndsl ${BASH_SOURCE%/*}/wnd-reaches-rococo.zndsl $rococo_dir run_zndsl ${BASH_SOURCE%/*}/wroc-reaches-rococo.zndsl $rococo_dir run_zndsl ${BASH_SOURCE%/*}/wwnd-reaches-westend.zndsl $westend_dir + +run_zndsl ${BASH_SOURCE%/*}/roc-relayer-balance-does-not-change.zndsl $rococo_dir +run_zndsl ${BASH_SOURCE%/*}/wnd-relayer-balance-does-not-change.zndsl $westend_dir diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl index dbc03864e2b6..5a8d6dabc20e 100644 --- a/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl +++ b/bridges/testing/tests/0001-asset-transfer/wnd-reaches-rococo.zndsl @@ -3,10 +3,10 @@ Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml Creds: config # send 5 WND to //Alice from Westend AH to Rococo AH -asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds +asset-hub-rococo-collator1: run {{ENV_PATH}}/helper.sh with "auto-log reserve-transfer-assets-from-asset-hub-westend-local 5000000000000" within 120 seconds # check that //Alice received at least 4.8 WND on Rococo AH asset-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,4800000000000,Westend" within 600 seconds -# check that the relayer //Charlie is rewarded by Rococo AH -bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 30 seconds +# relayer //Eve is rewarded for delivering messages from Westend BH +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/relayer-rewards.js with "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL,0x00000002,0x62687764,ThisChain,0" within 300 seconds diff --git a/bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl b/bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl new file mode 100644 index 000000000000..d2563e180786 --- /dev/null +++ b/bridges/testing/tests/0001-asset-transfer/wnd-relayer-balance-does-not-change.zndsl @@ -0,0 +1,11 @@ +Description: Finality and parachain relays should have the constant balance, because their transactions are free +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index f5a75aa03acd..574406ab305f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -22,6 +22,7 @@ scale-info = { version = "2.11.1", default-features = false, features = [ "derive", ] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +tuplex = { version = "0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -218,6 +219,7 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", + "tuplex/std", "xcm-builder/std", "xcm-executor/std", "xcm/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 93ef9470363c..5551b05e2025 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -49,7 +49,8 @@ pub type BridgeGrandpaWestendInstance = pallet_bridge_grandpa::Instance3; impl pallet_bridge_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type BridgedChain = bp_westend::Westend; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<5>; type HeadersToKeep = RelayChainHeadersToKeep; type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } @@ -89,7 +90,8 @@ pub type BridgeGrandpaRococoBulletinInstance = pallet_bridge_grandpa::Instance4; impl pallet_bridge_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type BridgedChain = bp_polkadot_bulletin::PolkadotBulletin; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<5>; type HeadersToKeep = RelayChainHeadersToKeep; // Technically this is incorrect - we have two pallet instances and ideally we shall // benchmark every instance separately. But the benchmarking engine has a flaw - it diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs index 8845f0538b5c..94b936889b77 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_bulletin_config.rs @@ -20,17 +20,15 @@ //! are reusing Polkadot Bulletin chain primitives everywhere here. use crate::{ - bridge_common_config::{BridgeGrandpaRococoBulletinInstance, BridgeHubRococo}, - weights, - xcm_config::UniversalLocation, - AccountId, BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, - RuntimeEvent, XcmOverRococoBulletin, XcmRouter, + bridge_common_config::BridgeHubRococo, weights, xcm_config::UniversalLocation, AccountId, + BridgeRococoBulletinGrandpa, BridgeRococoBulletinMessages, PolkadotXcm, Runtime, RuntimeEvent, + XcmOverRococoBulletin, XcmRouter, }; use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedGrandpaMessages, RefundSignedExtensionAdapter, + ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, RefundableMessagesLane, }, messages, @@ -83,6 +81,9 @@ parameter_types! { pub const RococoPeopleToRococoBulletinMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN; + // see the `FEE_BOOST_PER_RELAY_HEADER` constant get the meaning of this value + pub PriorityBoostPerRelayHeader: u64 = 58_014_163_614_163; + /// Priority boost that the registered relayer receives for every additional message in the message /// delivery transaction. /// @@ -169,9 +170,8 @@ impl messages::BridgedChainWithMessages for RococoBulletin {} /// Signed extension that refunds relayers that are delivering messages from the Rococo Bulletin /// chain. pub type OnBridgeHubRococoRefundRococoBulletinMessages = RefundSignedExtensionAdapter< - RefundBridgedGrandpaMessages< + RefundBridgedMessages< Runtime, - BridgeGrandpaRococoBulletinInstance, RefundableMessagesLane< WithRococoBulletinMessagesInstance, RococoPeopleToRococoBulletinMessagesLane, @@ -244,6 +244,9 @@ mod tests { /// operational costs and a faster bridge), so this value should be significant. const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_RELAY_HEADER: Balance = 2 * rococo::currency::UNITS; + #[test] fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { check_message_lane_weights::< @@ -273,7 +276,13 @@ mod tests { // Bulletin chain - it has the same (almost) runtime for Polkadot Bulletin and Rococo // Bulletin, so we have to adhere Polkadot names here - bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< + Runtime, + BridgeGrandpaRococoBulletinInstance, + PriorityBoostPerRelayHeader, + >(FEE_BOOST_PER_RELAY_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_message::ensure_priority_boost_is_sane::< Runtime, WithRococoBulletinMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index e5a00073407f..1681ac7f4687 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -29,8 +29,8 @@ use bp_messages::LaneId; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, + ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, }, messages, messages::{ @@ -65,6 +65,10 @@ parameter_types! { 2, [GlobalConsensus(WestendGlobalConsensusNetwork::get())] ); + // see the `FEE_BOOST_PER_RELAY_HEADER` constant get the meaning of this value + pub PriorityBoostPerRelayHeader: u64 = 32_007_814_407_814; + // see the `FEE_BOOST_PER_PARACHAIN_HEADER` constant get the meaning of this value + pub PriorityBoostPerParachainHeader: u64 = 1_396_340_903_540_903; // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; @@ -174,12 +178,8 @@ impl messages::BridgedChainWithMessages for BridgeHubWestend {} /// Signed extension that refunds relayers that are delivering messages from the Westend parachain. pub type OnBridgeHubRococoRefundBridgeHubWestendMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< + RefundBridgedMessages< Runtime, - RefundableParachain< - BridgeParachainWestendInstance, - bp_bridge_hub_westend::BridgeHubWestend, - >, RefundableMessagesLane< WithBridgeHubWestendMessagesInstance, AssetHubRococoToAssetHubWestendMessagesLane, @@ -246,6 +246,7 @@ mod tests { use crate::bridge_common_config::BridgeGrandpaWestendInstance; use bridge_runtime_common::{ assert_complete_bridge_types, + extensions::refund_relayer_extension::RefundableParachain, integrity::{ assert_complete_bridge_constants, check_message_lane_weights, AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, @@ -266,6 +267,11 @@ mod tests { /// operational costs and a faster bridge), so this value should be significant. const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_RELAY_HEADER: Balance = 2 * rococo::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_PARACHAIN_HEADER: Balance = 2 * rococo::currency::UNITS; + #[test] fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { check_message_lane_weights::< @@ -318,7 +324,19 @@ mod tests { }, }); - bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< + Runtime, + BridgeGrandpaWestendInstance, + PriorityBoostPerRelayHeader, + >(FEE_BOOST_PER_RELAY_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< + Runtime, + RefundableParachain, + PriorityBoostPerParachainHeader, + >(FEE_BOOST_PER_PARACHAIN_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_message::ensure_priority_boost_is_sane::< Runtime, WithBridgeHubWestendMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 109b081f937d..7c2aa4908861 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -35,6 +35,12 @@ pub mod bridge_to_westend_config; mod weights; pub mod xcm_config; +use bridge_runtime_common::extensions::{ + check_obsolete_extension::{ + CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, + }, + refund_relayer_extension::RefundableParachain, +}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use snowbridge_beacon_primitives::{Fork, ForkVersions}; use snowbridge_core::{ @@ -63,7 +69,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -740,10 +746,28 @@ pub type XcmOverRococoBulletin = XcmOverPolkadotBulletin; bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeWestendGrandpa, - BridgeRococoBulletinGrandpa, + CheckAndBoostBridgeGrandpaTransactions< + Runtime, + bridge_common_config::BridgeGrandpaWestendInstance, + bridge_to_westend_config::PriorityBoostPerRelayHeader, + xcm_config::TreasuryAccount, + >, + CheckAndBoostBridgeGrandpaTransactions< + Runtime, + bridge_common_config::BridgeGrandpaRococoBulletinInstance, + bridge_to_bulletin_config::PriorityBoostPerRelayHeader, + xcm_config::TreasuryAccount, + >, // Parachains - BridgeWestendParachains, + CheckAndBoostBridgeParachainsTransactions< + Runtime, + RefundableParachain< + bridge_common_config::BridgeParachainWestendInstance, + bp_bridge_hub_westend::BridgeHubWestend, + >, + bridge_to_westend_config::PriorityBoostPerParachainHeader, + xcm_config::TreasuryAccount, + >, // Messages BridgeWestendMessages, BridgeRococoBulletinMessages @@ -938,6 +962,11 @@ impl_runtime_apis! { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() } + fn free_headers_interval() -> Option { + >::FreeHeadersInterval::get() + } fn synced_headers_grandpa_info( ) -> Vec> { BridgeWestendGrandpa::synced_headers_grandpa_info() @@ -950,6 +979,10 @@ impl_runtime_apis! { bp_bridge_hub_westend::BridgeHubWestend >().unwrap_or(None) } + fn free_headers_interval() -> Option { + // "free interval" is not currently used for parachains + None + } } // This is exposed by BridgeHubRococo @@ -984,6 +1017,12 @@ impl_runtime_apis! { BridgePolkadotBulletinGrandpa::best_finalized() } + fn free_headers_interval() -> Option { + >::FreeHeadersInterval::get() + } + fn synced_headers_grandpa_info( ) -> Vec> { BridgePolkadotBulletinGrandpa::synced_headers_grandpa_info() diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index aac39a4564fb..942f243141da 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -17,8 +17,10 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_grandpa::WeightInfoExt as GrandpaWeightInfoExt; use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; +use ::pallet_bridge_relayers::WeightInfo as _; pub mod block_weights; pub mod cumulus_pallet_parachain_system; @@ -56,6 +58,16 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; +impl GrandpaWeightInfoExt for pallet_bridge_grandpa::WeightInfo { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } +} + impl MessagesWeightInfoExt for pallet_bridge_messages_rococo_to_rococo_bulletin::WeightInfo { @@ -94,4 +106,12 @@ impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } + + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 776c505fa640..b309232825db 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -80,11 +80,10 @@ fn construct_and_apply_extrinsic( r.unwrap() } -fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> Balance { - let batch_call = RuntimeCall::Utility(batch); - let batch_info = batch_call.get_dispatch_info(); - let xt = construct_extrinsic(Alice, batch_call); - TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) +fn construct_and_estimate_extrinsic_fee(call: RuntimeCall) -> Balance { + let info = call.get_dispatch_info(); + let xt = construct_extrinsic(Alice, call); + TransactionPayment::compute_fee(xt.encoded_size() as _, &info, 0) } fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { @@ -376,20 +375,20 @@ mod bridge_hub_westend_tests { } #[test] - pub fn complex_relay_extrinsic_works() { - // for Westend - from_parachain::complex_relay_extrinsic_works::( + fn free_relay_extrinsic_works() { + // from Westend + from_parachain::free_relay_extrinsic_works::( collator_session_keys(), slot_durations(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, BridgeHubWestendChainId::get(), + SIBLING_PARACHAIN_ID, Rococo, XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WESTEND, || (), construct_and_apply_extrinsic, - ); + ) } #[test] @@ -414,12 +413,12 @@ mod bridge_hub_westend_tests { } #[test] - pub fn can_calculate_fee_for_complex_message_delivery_transaction() { + fn can_calculate_fee_for_standalone_message_delivery_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(), || { - from_parachain::can_calculate_fee_for_complex_message_delivery_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_delivery_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -433,12 +432,12 @@ mod bridge_hub_westend_tests { } #[test] - pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { + fn can_calculate_fee_for_standalone_message_confirmation_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(), || { - from_parachain::can_calculate_fee_for_complex_message_confirmation_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_confirmation_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -581,28 +580,28 @@ mod bridge_hub_bulletin_tests { } #[test] - pub fn complex_relay_extrinsic_works() { - // for Bulletin - from_grandpa_chain::complex_relay_extrinsic_works::( + fn free_relay_extrinsic_works() { + // from Bulletin + from_grandpa_chain::free_relay_extrinsic_works::( collator_session_keys(), slot_durations(), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, RococoBulletinChainId::get(), + SIBLING_PARACHAIN_ID, Rococo, XCM_LANE_FOR_ROCOCO_PEOPLE_TO_ROCOCO_BULLETIN, || (), construct_and_apply_extrinsic, - ); + ) } #[test] - pub fn can_calculate_fee_for_complex_message_delivery_transaction() { + pub fn can_calculate_fee_for_standalone_message_delivery_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseDeliveryFeeInRocs::get(), || { - from_grandpa_chain::can_calculate_fee_for_complex_message_delivery_transaction::< + from_grandpa_chain::can_calculate_fee_for_standalone_message_delivery_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -617,12 +616,12 @@ mod bridge_hub_bulletin_tests { } #[test] - pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { + pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs", bp_bridge_hub_rococo::BridgeHubRococoBaseConfirmationFeeInRocs::get(), || { - from_grandpa_chain::can_calculate_fee_for_complex_message_confirmation_transaction::< + from_grandpa_chain::can_calculate_fee_for_standalone_message_confirmation_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 86560caca99c..a7241cc6d10c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -18,6 +18,7 @@ hex-literal = { version = "0.4.1" } log = { workspace = true } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } serde = { optional = true, features = ["derive"], workspace = true, default-features = true } +tuplex = { version = "0.1", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } @@ -180,6 +181,7 @@ std = [ "sp-version/std", "substrate-wasm-builder", "testnet-parachains-constants/std", + "tuplex/std", "westend-runtime-constants/std", "xcm-builder/std", "xcm-executor/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs index d5da41cce286..425b53da30fc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/bridge_to_rococo_config.rs @@ -26,8 +26,8 @@ use bp_parachains::SingleParaStoredHeaderDataBuilder; use bp_runtime::Chain; use bridge_runtime_common::{ extensions::refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, + ActualFeeRefund, RefundBridgedMessages, RefundSignedExtensionAdapter, + RefundableMessagesLane, }, messages, messages::{ @@ -70,6 +70,10 @@ parameter_types! { 2, [GlobalConsensus(RococoGlobalConsensusNetwork::get())] ); + // see the `FEE_BOOST_PER_RELAY_HEADER` constant get the meaning of this value + pub PriorityBoostPerRelayHeader: u64 = 32_007_814_407_814; + // see the `FEE_BOOST_PER_PARACHAIN_HEADER` constant get the meaning of this value + pub PriorityBoostPerParachainHeader: u64 = 1_396_340_903_540_903; // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; @@ -191,9 +195,8 @@ impl ThisChainWithMessages for BridgeHubWestend { /// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. pub type OnBridgeHubWestendRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< + RefundBridgedMessages< Runtime, - RefundableParachain, RefundableMessagesLane< WithBridgeHubRococoMessagesInstance, AssetHubWestendToAssetHubRococoMessagesLane, @@ -210,7 +213,8 @@ pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance1; impl pallet_bridge_grandpa::Config for Runtime { type RuntimeEvent = RuntimeEvent; type BridgedChain = bp_rococo::Rococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; + type MaxFreeHeadersPerBlock = ConstU32<4>; + type FreeHeadersInterval = ConstU32<5>; type HeadersToKeep = RelayChainHeadersToKeep; type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } @@ -281,6 +285,7 @@ mod tests { use super::*; use bridge_runtime_common::{ assert_complete_bridge_types, + extensions::refund_relayer_extension::RefundableParachain, integrity::{ assert_complete_bridge_constants, check_message_lane_weights, AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, @@ -301,6 +306,11 @@ mod tests { /// operational costs and a faster bridge), so this value should be significant. const FEE_BOOST_PER_MESSAGE: Balance = 2 * westend::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_RELAY_HEADER: Balance = 2 * westend::currency::UNITS; + // see `FEE_BOOST_PER_MESSAGE` comment + const FEE_BOOST_PER_PARACHAIN_HEADER: Balance = 2 * westend::currency::UNITS; + #[test] fn ensure_bridge_hub_westend_message_lane_weights_are_correct() { check_message_lane_weights::< @@ -352,7 +362,19 @@ mod tests { }, }); - bridge_runtime_common::extensions::priority_calculator::ensure_priority_boost_is_sane::< + bridge_runtime_common::extensions::priority_calculator::per_relay_header::ensure_priority_boost_is_sane::< + Runtime, + BridgeGrandpaRococoInstance, + PriorityBoostPerRelayHeader, + >(FEE_BOOST_PER_RELAY_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_parachain_header::ensure_priority_boost_is_sane::< + Runtime, + RefundableParachain, + PriorityBoostPerParachainHeader, + >(FEE_BOOST_PER_PARACHAIN_HEADER); + + bridge_runtime_common::extensions::priority_calculator::per_message::ensure_priority_boost_is_sane::< Runtime, WithBridgeHubRococoMessagesInstance, PriorityBoostPerMessage, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index cf09a1acc548..640eaf881a57 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -32,6 +32,12 @@ pub mod bridge_to_rococo_config; mod weights; pub mod xcm_config; +use bridge_runtime_common::extensions::{ + check_obsolete_extension::{ + CheckAndBoostBridgeGrandpaTransactions, CheckAndBoostBridgeParachainsTransactions, + }, + refund_relayer_extension::RefundableParachain, +}; use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use cumulus_primitives_core::ParaId; use sp_api::impl_runtime_apis; @@ -57,7 +63,7 @@ use frame_support::{ dispatch::DispatchClass, genesis_builder_helper::{build_state, get_preset}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, Get, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -502,9 +508,22 @@ construct_runtime!( bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeRococoGrandpa, + CheckAndBoostBridgeGrandpaTransactions< + Runtime, + bridge_to_rococo_config::BridgeGrandpaRococoInstance, + bridge_to_rococo_config::PriorityBoostPerRelayHeader, + xcm_config::TreasuryAccount, + >, // Parachains - BridgeRococoParachains, + CheckAndBoostBridgeParachainsTransactions< + Runtime, + RefundableParachain< + bridge_to_rococo_config::BridgeParachainRococoInstance, + bp_bridge_hub_rococo::BridgeHubRococo, + >, + bridge_to_rococo_config::PriorityBoostPerParachainHeader, + xcm_config::TreasuryAccount, + >, // Messages BridgeRococoMessages } @@ -692,6 +711,11 @@ impl_runtime_apis! { fn best_finalized() -> Option> { BridgeRococoGrandpa::best_finalized() } + fn free_headers_interval() -> Option { + >::FreeHeadersInterval::get() + } fn synced_headers_grandpa_info( ) -> Vec> { BridgeRococoGrandpa::synced_headers_grandpa_info() @@ -704,6 +728,10 @@ impl_runtime_apis! { bp_bridge_hub_rococo::BridgeHubRococo >().unwrap_or(None) } + fn free_headers_interval() -> Option { + // "free interval" is not currently used for parachains + None + } } impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index a65ee31d3e55..245daaf8ed91 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -17,8 +17,10 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_grandpa::WeightInfoExt as GrandpaWeightInfoExt; use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; +use ::pallet_bridge_relayers::WeightInfo as _; pub mod block_weights; pub mod cumulus_pallet_parachain_system; @@ -51,6 +53,16 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; +impl GrandpaWeightInfoExt for pallet_bridge_grandpa::WeightInfo { + fn submit_finality_proof_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } +} + impl MessagesWeightInfoExt for pallet_bridge_messages::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE @@ -70,4 +82,12 @@ impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo u32 { bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE } + + fn submit_parachain_heads_overhead_from_runtime() -> Weight { + // our signed extension: + // 1) checks whether relayer registration is active from validate/pre_dispatch; + // 2) may slash and deregister relayer from post_dispatch + // (2) includes (1), so (2) is the worst case + pallet_bridge_relayers::WeightInfo::::slash_and_deregister() + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 988b10e1e2d8..836594140b23 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -94,11 +94,10 @@ fn construct_and_apply_extrinsic( r.unwrap() } -fn construct_and_estimate_extrinsic_fee(batch: pallet_utility::Call) -> Balance { - let batch_call = RuntimeCall::Utility(batch); - let batch_info = batch_call.get_dispatch_info(); - let xt = construct_extrinsic(Alice, batch_call); - TransactionPayment::compute_fee(xt.encoded_size() as _, &batch_info, 0) +fn construct_and_estimate_extrinsic_fee(call: RuntimeCall) -> Balance { + let info = call.get_dispatch_info(); + let xt = construct_extrinsic(Alice, call); + TransactionPayment::compute_fee(xt.encoded_size() as _, &info, 0) } fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys { @@ -271,22 +270,6 @@ fn relayed_incoming_message_works() { ) } -#[test] -pub fn complex_relay_extrinsic_works() { - from_parachain::complex_relay_extrinsic_works::( - collator_session_keys(), - slot_durations(), - bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubRococoChainId::get(), - Westend, - XCM_LANE_FOR_ASSET_HUB_WESTEND_TO_ASSET_HUB_ROCOCO, - || (), - construct_and_apply_extrinsic, - ); -} - #[test] pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { bridge_hub_test_utils::check_sane_fees_values( @@ -309,12 +292,12 @@ pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { } #[test] -pub fn can_calculate_fee_for_complex_message_delivery_transaction() { +pub fn can_calculate_fee_for_standalone_message_delivery_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds", bp_bridge_hub_westend::BridgeHubWestendBaseDeliveryFeeInWnds::get(), || { - from_parachain::can_calculate_fee_for_complex_message_delivery_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_delivery_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, @@ -328,12 +311,12 @@ pub fn can_calculate_fee_for_complex_message_delivery_transaction() { } #[test] -pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { +pub fn can_calculate_fee_for_standalone_message_confirmation_transaction() { bridge_hub_test_utils::check_sane_fees_values( "bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds", bp_bridge_hub_westend::BridgeHubWestendBaseConfirmationFeeInWnds::get(), || { - from_parachain::can_calculate_fee_for_complex_message_confirmation_transaction::< + from_parachain::can_calculate_fee_for_standalone_message_confirmation_transaction::< RuntimeTestsAdapter, >(collator_session_keys(), construct_and_estimate_extrinsic_fee) }, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs index 8aaaa4f59d78..bfa2f0f50f94 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_grandpa_chain.rs @@ -41,6 +41,7 @@ use frame_system::pallet_prelude::BlockNumberFor; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; +use sp_core::Get; use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -162,7 +163,14 @@ pub fn relayed_incoming_message_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< RuntimeHelper::MB, (), - >(lane_id, xcm.into(), message_nonce, message_destination, relay_header_number); + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + relay_header_number, + false, + ); let relay_chain_header_hash = relay_chain_header.hash(); vec![ @@ -202,6 +210,142 @@ pub fn relayed_incoming_message_works( ); } +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, message) independently submitted. +/// Finality proof is submitted for free in this test. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn free_relay_extrinsic_works( + collator_session_key: CollatorSessionKeys, + slot_durations: SlotDurations, + runtime_para_id: u32, + bridged_chain_id: bp_runtime::ChainId, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + RuntimeCallOf, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteGrandpaChainHelper, + RuntimeHelper::Runtime: pallet_balances::Config, + AccountIdOf: From, + RuntimeCallOf: From> + + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + // ensure that the runtime allows free header submissions + let free_headers_interval = >::FreeHeadersInterval::get() + .expect("this test requires runtime, configured to accept headers for free; qed"); + + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + slot_durations, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + prepare_configuration(); + + // start with bridged relay chain block#0 + let initial_block_number = 0; + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::( + initial_block_number, + ), + ); + + // free relay chain header is `0 + free_headers_interval` + let relay_header_number = initial_block_number + free_headers_interval; + + // relayer balance shall not change after relay and para header submissions + let initial_relayer_balance = + pallet_balances::Pallet::::free_balance( + relayer_id_at_this_chain.clone(), + ); + + // initialize the `FreeHeadersRemaining` storage value + pallet_bridge_grandpa::Pallet::::on_initialize( + 0u32.into(), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let (relay_chain_header, grandpa_justification, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + relay_header_number.into(), + true, + ); + + let relay_chain_header_hash = relay_chain_header.hash(); + vec![ + ( + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + Box::new(( + helpers::VerifySubmitGrandpaFinalityProofOutcome::::expect_best_header_hash( + relay_chain_header_hash, + ), + helpers::VerifyRelayerBalance::::expect_relayer_balance( + relayer_id_at_this_chain.clone(), + initial_relayer_balance, + ), + )) + ), + ( + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + Box::new(( + helpers::VerifySubmitMessagesProofOutcome::::expect_last_delivered_nonce( + lane_id, + 1, + ), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + ), + ] + }, + ); +} + /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, /// with proofs (finality, message) batched together in signed extrinsic. /// Also verifies relayer transaction signed extensions work as intended. @@ -265,7 +409,14 @@ pub fn complex_relay_extrinsic_works( test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< RuntimeHelper::MB, (), - >(lane_id, xcm.into(), message_nonce, message_destination, relay_header_number); + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + relay_header_number, + false, + ); let relay_chain_header_hash = relay_chain_header.hash(); vec![( @@ -344,6 +495,7 @@ where 1, [GlobalConsensus(Polkadot), Parachain(1_000)].into(), 1u32.into(), + false, ); // generate batch call that provides finality for bridged relay and parachains + message @@ -423,3 +575,109 @@ where compute_extrinsic_fee(batch) }) } + +/// Estimates transaction fee for default message delivery transaction from bridged GRANDPA chain. +pub fn can_calculate_fee_for_standalone_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteGrandpaChainHelper, + RuntimeCallOf: + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let (_, _, message_proof) = + test_data::from_grandpa_chain::make_complex_relayer_delivery_proofs::< + RuntimeHelper::MB, + (), + >( + LaneId::default(), + vec![Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + [GlobalConsensus(Polkadot), Parachain(1_000)].into(), + 1u32.into(), + false, + ); + + let call = test_data::from_grandpa_chain::make_standalone_relayer_delivery_call::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::MPI, + >( + message_proof, + helpers::relayer_id_at_bridged_chain::(), + ); + + compute_extrinsic_fee(call) + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_standalone_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteGrandpaChainHelper, + AccountIdOf: From, + MessageThisChain: + bp_runtime::Chain>, + RuntimeCallOf: + From>, + UnderlyingChainOf>: ChainWithGrandpa, + >::TargetHeaderChain: + TargetHeaderChain< + XcmAsPlainPayload, + AccountIdOf, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let (_, _, message_delivery_proof) = + test_data::from_grandpa_chain::make_complex_relayer_confirmation_proofs::< + RuntimeHelper::MB, + (), + >( + LaneId::default(), + 1u32.into(), + AccountId32::from(Alice.public()).into(), + unrewarded_relayers.clone(), + ); + + let call = test_data::from_grandpa_chain::make_standalone_relayer_confirmation_call::< + RuntimeHelper::Runtime, + RuntimeHelper::GPI, + RuntimeHelper::MPI, + >(message_delivery_proof, unrewarded_relayers); + + compute_extrinsic_fee(call) + }) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs index 72ec0718acf7..12ab382d9e0f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/from_parachain.rs @@ -42,6 +42,7 @@ use frame_system::pallet_prelude::BlockNumberFor; use parachains_runtimes_test_utils::{ AccountIdOf, BasicParachainRuntime, CollatorSessionKeys, RuntimeCallOf, SlotDurations, }; +use sp_core::Get; use sp_keyring::AccountKeyring::*; use sp_runtime::{traits::Header as HeaderT, AccountId32}; use xcm::latest::prelude::*; @@ -188,6 +189,7 @@ pub fn relayed_incoming_message_works( para_header_number, relay_header_number, bridged_para_id, + false, ); let parachain_head_hash = parachain_head.hash(); @@ -241,6 +243,177 @@ pub fn relayed_incoming_message_works( ); } +/// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, +/// with proofs (finality, para heads, message) independently submitted. +/// Finality and para heads are submitted for free in this test. +/// Also verifies relayer transaction signed extensions work as intended. +pub fn free_relay_extrinsic_works( + collator_session_key: CollatorSessionKeys, + slot_durations: SlotDurations, + runtime_para_id: u32, + bridged_para_id: u32, + bridged_chain_id: bp_runtime::ChainId, + sibling_parachain_id: u32, + local_relay_chain_id: NetworkId, + lane_id: LaneId, + prepare_configuration: impl Fn(), + construct_and_apply_extrinsic: fn( + sp_keyring::AccountKeyring, + ::RuntimeCall, + ) -> sp_runtime::DispatchOutcome, +) where + RuntimeHelper: WithRemoteParachainHelper, + RuntimeHelper::Runtime: pallet_balances::Config, + AccountIdOf: From, + RuntimeCallOf: From> + + From> + + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + // ensure that the runtime allows free header submissions + let free_headers_interval = >::FreeHeadersInterval::get() + .expect("this test requires runtime, configured to accept headers for free; qed"); + + helpers::relayed_incoming_message_works::< + RuntimeHelper::Runtime, + RuntimeHelper::AllPalletsWithoutSystem, + RuntimeHelper::MPI, + >( + collator_session_key, + slot_durations, + runtime_para_id, + sibling_parachain_id, + local_relay_chain_id, + construct_and_apply_extrinsic, + |relayer_id_at_this_chain, + relayer_id_at_bridged_chain, + message_destination, + message_nonce, + xcm| { + prepare_configuration(); + + // start with bridged relay chain block#0 + let initial_block_number = 0; + helpers::initialize_bridge_grandpa_pallet::( + test_data::initialization_data::( + initial_block_number, + ), + ); + + // free relay chain header is `0 + free_headers_interval` + let relay_header_number = initial_block_number + free_headers_interval; + // first parachain header is always submitted for free + let para_header_number = 1; + + // relayer balance shall not change after relay and para header submissions + let initial_relayer_balance = + pallet_balances::Pallet::::free_balance( + relayer_id_at_this_chain.clone(), + ); + + // initialize the `FreeHeadersRemaining` storage value + pallet_bridge_grandpa::Pallet::::on_initialize( + 0u32.into(), + ); + + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let ( + relay_chain_header, + grandpa_justification, + parachain_head, + parachain_heads, + para_heads_proof, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + lane_id, + xcm.into(), + message_nonce, + message_destination, + para_header_number, + relay_header_number, + bridged_para_id, + true, + ); + + let parachain_head_hash = parachain_head.hash(); + let relay_chain_header_hash = relay_chain_header.hash(); + let relay_chain_header_number = *relay_chain_header.number(); + vec![ + ( + BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(relay_chain_header), + justification: grandpa_justification, + }.into(), + Box::new(( + helpers::VerifySubmitGrandpaFinalityProofOutcome::::expect_best_header_hash( + relay_chain_header_hash, + ), + helpers::VerifyRelayerBalance::::expect_relayer_balance( + relayer_id_at_this_chain.clone(), + initial_relayer_balance, + ), + )), + ), + ( + BridgeParachainsCall::::submit_parachain_heads { + at_relay_block: (relay_chain_header_number, relay_chain_header_hash), + parachains: parachain_heads, + parachain_heads_proof: para_heads_proof, + }.into(), + Box::new(( + helpers::VerifySubmitParachainHeaderProofOutcome::::expect_best_header_hash( + bridged_para_id, + parachain_head_hash, + ), + /*helpers::VerifyRelayerBalance::::expect_relayer_balance( + relayer_id_at_this_chain.clone(), + initial_relayer_balance, + ),*/ + )), + ), + ( + BridgeMessagesCall::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + }.into(), + Box::new(( + helpers::VerifySubmitMessagesProofOutcome::::expect_last_delivered_nonce( + lane_id, + 1, + ), + helpers::VerifyRelayerRewarded::::expect_relayer_reward( + relayer_id_at_this_chain, + RewardsAccountParams::new( + lane_id, + bridged_chain_id, + RewardsAccountOwner::ThisChain, + ), + ), + )), + ), + ] + }, + ); +} + /// Test-case makes sure that Runtime can dispatch XCM messages submitted by relayer, /// with proofs (finality, para heads, message) batched together in signed extrinsic. /// Also verifies relayer transaction signed extensions work as intended. @@ -325,6 +498,7 @@ pub fn complex_relay_extrinsic_works( para_header_number, relay_header_number, bridged_para_id, + false, ); let parachain_head_hash = parachain_head.hash(); @@ -428,6 +602,7 @@ where 1, 5, 1_000, + false, ); // generate batch call that provides finality for bridged relay and parachains + message @@ -527,3 +702,126 @@ where compute_extrinsic_fee(batch) }) } + +/// Estimates transaction fee for default message delivery transaction from bridged parachain. +pub fn can_calculate_fee_for_standalone_message_delivery_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteParachainHelper, + RuntimeCallOf: + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::SourceHeaderChain: + SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof< + HashOf>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + // + // we don't care about parameter values here, apart from the XCM message size. But we + // do not need to have a large message here, because we're charging for every byte of + // the message additionally + let ( + _, + _, + _, + _, + _, + message_proof, + ) = test_data::from_parachain::make_complex_relayer_delivery_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + LaneId::default(), + vec![Instruction::<()>::ClearOrigin; 1_024].into(), + 1, + [GlobalConsensus(Polkadot), Parachain(1_000)].into(), + 1, + 5, + 1_000, + false, + ); + + let call = test_data::from_parachain::make_standalone_relayer_delivery_call::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + _, + >( + message_proof, + helpers::relayer_id_at_bridged_chain::(), + ); + + compute_extrinsic_fee(call) + }) +} + +/// Estimates transaction fee for default message confirmation transaction (batched with required +/// proofs) from bridged parachain. +pub fn can_calculate_fee_for_standalone_message_confirmation_transaction( + collator_session_key: CollatorSessionKeys, + compute_extrinsic_fee: fn( + ::RuntimeCall, + ) -> u128, +) -> u128 +where + RuntimeHelper: WithRemoteParachainHelper, + AccountIdOf: From, + MessageThisChain: + bp_runtime::Chain>, + RuntimeCallOf: + From>, + UnderlyingChainOf>: + bp_runtime::Chain + Parachain, + >::BridgedChain: + bp_runtime::Chain + ChainWithGrandpa, + >::TargetHeaderChain: + TargetHeaderChain< + XcmAsPlainPayload, + AccountIdOf, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>>, + >, + >, +{ + run_test::(collator_session_key, 1000, vec![], || { + // generate bridged relay chain finality, parachain heads and message proofs, + // to be submitted by relayer to this chain. + let unrewarded_relayers = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }; + let (_, _, _, _, _, message_delivery_proof) = + test_data::from_parachain::make_complex_relayer_confirmation_proofs::< + >::BridgedChain, + RuntimeHelper::MB, + (), + >( + LaneId::default(), + 1, + 5, + 1_000, + AccountId32::from(Alice.public()).into(), + unrewarded_relayers.clone(), + ); + + let call = test_data::from_parachain::make_standalone_relayer_confirmation_call::< + RuntimeHelper::Runtime, + RuntimeHelper::MPI, + >(message_delivery_proof, unrewarded_relayers); + + compute_extrinsic_fee(call) + }) +} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs index 2b48f2e3d515..0ce049cd1c46 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases/helpers.rs @@ -193,6 +193,34 @@ where } } +/// Verifies that relayer balance is equal to given value. +pub struct VerifyRelayerBalance { + relayer: Runtime::AccountId, + balance: Runtime::Balance, +} + +impl VerifyRelayerBalance +where + Runtime: pallet_balances::Config, +{ + /// Expect given relayer balance after transaction. + pub fn expect_relayer_balance( + relayer: Runtime::AccountId, + balance: Runtime::Balance, + ) -> Box { + Box::new(Self { relayer, balance }) + } +} + +impl VerifyTransactionOutcome for VerifyRelayerBalance +where + Runtime: pallet_balances::Config, +{ + fn verify_outcome(&self) { + assert_eq!(pallet_balances::Pallet::::free_balance(&self.relayer), self.balance,); + } +} + /// Initialize bridge GRANDPA pallet. pub(crate) fn initialize_bridge_grandpa_pallet( init_data: bp_header_chain::InitializationData>, diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs index 017ec0fd5405..e5d5e7cac96b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_grandpa_chain.rs @@ -121,6 +121,60 @@ where } } +/// Prepare a call with message proof. +pub fn make_standalone_relayer_delivery_call( + message_proof: FromBridgedChainMessagesProof>>, + relayer_id_at_bridged_chain: AccountIdOf>, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = AccountIdOf>, + >, + MPI: 'static, + >::SourceHeaderChain: SourceHeaderChain< + MessagesProof = FromBridgedChainMessagesProof>>, + >, + Runtime::RuntimeCall: From>, +{ + pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain, + proof: message_proof, + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + } + .into() +} + +/// Prepare a call with message delivery proof. +pub fn make_standalone_relayer_confirmation_call( + message_delivery_proof: FromBridgedChainMessagesDeliveryProof< + HashOf>, + >, + relayers_state: UnrewardedRelayersState, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_grandpa::Config + + pallet_bridge_messages::Config, + MPI: 'static, + >::TargetHeaderChain: TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof< + HashOf>, + >, + >, + Runtime::RuntimeCall: From>, +{ + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state, + } + .into() +} + /// Prepare storage proofs of messages, stored at the (bridged) source GRANDPA chain. pub fn make_complex_relayer_delivery_proofs( lane_id: LaneId, @@ -128,6 +182,7 @@ pub fn make_complex_relayer_delivery_proofs( message_nonce: MessageNonce, message_destination: Junctions, header_number: BlockNumberOf>, + is_minimal_call: bool, ) -> ( HeaderOf>, GrandpaJustification>>, @@ -153,7 +208,7 @@ where let (header, justification) = make_complex_bridged_grandpa_header_proof::< MessageBridgedChain, - >(state_root, header_number); + >(state_root, header_number, is_minimal_call); let message_proof = FromBridgedChainMessagesProof { bridged_header_hash: header.hash(), @@ -200,8 +255,11 @@ where StorageProofSize::Minimal(0), ); - let (header, justification) = - make_complex_bridged_grandpa_header_proof::(state_root, header_number); + let (header, justification) = make_complex_bridged_grandpa_header_proof::( + state_root, + header_number, + false, + ); let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { bridged_header_hash: header.hash(), @@ -216,6 +274,7 @@ where pub fn make_complex_bridged_grandpa_header_proof( state_root: HashOf, header_number: BlockNumberOf, + is_minimal_call: bool, ) -> (HeaderOf, GrandpaJustification>) where BridgedChain: ChainWithGrandpa, @@ -229,7 +288,9 @@ where // `submit_finality_proof` call size would be close to maximal expected (and refundable) let extra_bytes_required = maximal_expected_submit_finality_proof_call_size::() .saturating_sub(header.encoded_size()); - header.digest_mut().push(DigestItem::Other(vec![42; extra_bytes_required])); + if !is_minimal_call { + header.digest_mut().push(DigestItem::Other(vec![42; extra_bytes_required])); + } let justification = make_default_justification(&header); (header, justification) diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs index 932ba2312399..5d3cba4e53b5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_data/from_parachain.rs @@ -159,6 +159,52 @@ where } } +/// Prepare a call with message proof. +pub fn make_standalone_relayer_delivery_call( + message_proof: FromBridgedChainMessagesProof, + relayer_id_at_bridged_chain: InboundRelayer, +) -> Runtime::RuntimeCall where + Runtime: pallet_bridge_messages::Config< + MPI, + InboundPayload = XcmAsPlainPayload, + InboundRelayer = InboundRelayer, + >, + MPI: 'static, + Runtime::RuntimeCall: From>, + <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof: + From>, +{ + pallet_bridge_messages::Call::::receive_messages_proof { + relayer_id_at_bridged_chain: relayer_id_at_bridged_chain.into(), + proof: message_proof.into(), + messages_count: 1, + dispatch_weight: Weight::from_parts(1000000000, 0), + } + .into() +} + +/// Prepare a call with message delivery proof. +pub fn make_standalone_relayer_confirmation_call( + message_delivery_proof: FromBridgedChainMessagesDeliveryProof, + relayers_state: UnrewardedRelayersState, +) -> Runtime::RuntimeCall +where + Runtime: pallet_bridge_messages::Config, + MPI: 'static, + Runtime::RuntimeCall: From>, + >::TargetHeaderChain: TargetHeaderChain< + XcmAsPlainPayload, + Runtime::AccountId, + MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, + >, +{ + pallet_bridge_messages::Call::::receive_messages_delivery_proof { + proof: message_delivery_proof, + relayers_state, + } + .into() +} + /// Prepare storage proofs of messages, stored at the source chain. pub fn make_complex_relayer_delivery_proofs( lane_id: LaneId, @@ -168,6 +214,7 @@ pub fn make_complex_relayer_delivery_proofs ( HeaderOf, GrandpaJustification>, @@ -201,6 +248,7 @@ where para_header_number, relay_header_number, bridged_para_id, + is_minimal_call, ); let message_proof = FromBridgedChainMessagesProof { @@ -266,6 +314,7 @@ where para_header_number, relay_header_number, bridged_para_id, + false, ); let message_delivery_proof = FromBridgedChainMessagesDeliveryProof { @@ -290,6 +339,7 @@ pub fn make_complex_bridged_parachain_heads_proof( para_header_number: u32, relay_header_number: BlockNumberOf, bridged_para_id: u32, + is_minimal_call: bool, ) -> ( HeaderOf, GrandpaJustification>, @@ -319,9 +369,12 @@ where )]); assert_eq!(bridged_para_head.hash(), parachain_heads[0].1); - let (relay_chain_header, justification) = make_complex_bridged_grandpa_header_proof::< - BridgedRelayChain, - >(relay_state_root, relay_header_number); + let (relay_chain_header, justification) = + make_complex_bridged_grandpa_header_proof::( + relay_state_root, + relay_header_number, + is_minimal_call, + ); (relay_chain_header, justification, bridged_para_head, parachain_heads, para_heads_proof) } diff --git a/prdoc/pr_4102.prdoc b/prdoc/pr_4102.prdoc new file mode 100644 index 000000000000..50c1ec23b2ac --- /dev/null +++ b/prdoc/pr_4102.prdoc @@ -0,0 +1,43 @@ +title: "Bridge: make some headers submissions free" + +doc: + - audience: Runtime Dev + description: | + Adds `FreeHeadersInterval` configuration constant to the `pallet_bridge_grandpa`. + Transactions that improve best known header by at least `FreeHeadersInterval` headers + are now free for the submitter. Additionally, we allow single free parachain header + update per every free relay chain header. Bridge signed extensions are adjusted + to support that new scheme. Bridge runtime APIs are extended to support that new + scheme. Bridge fees are decreased by ~98% because now they do not include cost of + finality submissions - we assume relayers will be submitting finality transactions + for free. + +crates: + - name: bridge-runtime-common + bump: major + - name: bp-bridge-hub-cumulus + bump: patch + - name: bp-bridge-hub-kusama + bump: major + - name: bp-bridge-hub-polkadot + bump: major + - name: bp-bridge-hub-rococo + bump: major + - name: bp-bridge-hub-westend + bump: major + - name: pallet-bridge-grandpa + bump: major + - name: pallet-bridge-parachains + bump: major + - name: bp-parachains + bump: major + - name: bp-runtime + bump: major + - name: relay-substrate-client + bump: major + - name: bridge-hub-rococo-runtime + bump: major + - name: bridge-hub-westend-runtime + bump: major + - name: bridge-hub-test-utils + bump: minor From 7e68b2b8da9caf634ff4f6c6d96d2d7914c44fb7 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 25 Apr 2024 10:20:17 +0300 Subject: [PATCH 53/74] Bridge: added free headers submission support to the substrate-relay (#4157) Original PR: https://github.com/paritytech/parity-bridges-common/pull/2884. Since chain-specific code lives in the `parity-bridges-common` repo, some parts of original PR will require another PR --------- Co-authored-by: Adrian Catangiu --- bridges/chains/chain-kusama/src/lib.rs | 2 + bridges/chains/chain-polkadot/src/lib.rs | 2 + bridges/chains/chain-rococo/src/lib.rs | 2 + bridges/chains/chain-westend/src/lib.rs | 2 + bridges/relays/client-substrate/src/chain.rs | 9 + .../relays/client-substrate/src/test_chain.rs | 2 + bridges/relays/finality/README.md | 4 +- bridges/relays/finality/src/finality_loop.rs | 152 +++++++-- bridges/relays/finality/src/headers.rs | 143 ++++++++- bridges/relays/finality/src/lib.rs | 4 +- bridges/relays/finality/src/mock.rs | 5 + .../src/cli/relay_headers.rs | 18 +- .../src/cli/relay_headers_and_messages/mod.rs | 17 +- .../parachain_to_parachain.rs | 4 +- .../relay_to_parachain.rs | 4 +- .../relay_to_relay.rs | 4 +- .../src/cli/relay_parachains.rs | 14 +- .../lib-substrate-relay/src/finality/mod.rs | 16 +- .../src/finality/target.rs | 21 +- bridges/relays/lib-substrate-relay/src/lib.rs | 3 + .../src/on_demand/headers.rs | 16 +- .../src/on_demand/parachains.rs | 14 +- .../lib-substrate-relay/src/parachains/mod.rs | 2 + .../src/parachains/target.rs | 109 +++++-- .../relays/parachains/src/parachains_loop.rs | 303 ++++++++++++++++-- prdoc/pr_4157.prdoc | 29 ++ 26 files changed, 769 insertions(+), 132 deletions(-) create mode 100644 prdoc/pr_4157.prdoc diff --git a/bridges/chains/chain-kusama/src/lib.rs b/bridges/chains/chain-kusama/src/lib.rs index a81004afe812..fd7172c5869d 100644 --- a/bridges/chains/chain-kusama/src/lib.rs +++ b/bridges/chains/chain-kusama/src/lib.rs @@ -67,6 +67,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Kusama GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_KUSAMA_GRANDPA_PALLET_NAME: &str = "BridgeKusamaGrandpa"; +/// Name of the With-Kusama parachains pallet instance that is deployed at bridged chains. +pub const WITH_KUSAMA_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeKusamaParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Polkadot /// parachains. diff --git a/bridges/chains/chain-polkadot/src/lib.rs b/bridges/chains/chain-polkadot/src/lib.rs index 00d35783a9b6..a8cac0467d57 100644 --- a/bridges/chains/chain-polkadot/src/lib.rs +++ b/bridges/chains/chain-polkadot/src/lib.rs @@ -69,6 +69,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Polkadot GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_POLKADOT_GRANDPA_PALLET_NAME: &str = "BridgePolkadotGrandpa"; +/// Name of the With-Polkadot parachains pallet instance that is deployed at bridged chains. +pub const WITH_POLKADOT_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgePolkadotParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Polkadot /// parachains. diff --git a/bridges/chains/chain-rococo/src/lib.rs b/bridges/chains/chain-rococo/src/lib.rs index 2385dd2cbb25..b290fe71c829 100644 --- a/bridges/chains/chain-rococo/src/lib.rs +++ b/bridges/chains/chain-rococo/src/lib.rs @@ -67,6 +67,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Rococo GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_ROCOCO_GRANDPA_PALLET_NAME: &str = "BridgeRococoGrandpa"; +/// Name of the With-Rococo parachains pallet instance that is deployed at bridged chains. +pub const WITH_ROCOCO_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeRococoParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Rococo /// parachains. diff --git a/bridges/chains/chain-westend/src/lib.rs b/bridges/chains/chain-westend/src/lib.rs index b344b7f4bf93..ef451f7de0a9 100644 --- a/bridges/chains/chain-westend/src/lib.rs +++ b/bridges/chains/chain-westend/src/lib.rs @@ -67,6 +67,8 @@ pub const PARAS_PALLET_NAME: &str = "Paras"; /// Name of the With-Westend GRANDPA pallet instance that is deployed at bridged chains. pub const WITH_WESTEND_GRANDPA_PALLET_NAME: &str = "BridgeWestendGrandpa"; +/// Name of the With-Westend parachains pallet instance that is deployed at bridged chains. +pub const WITH_WESTEND_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeWestendParachains"; /// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Westend /// parachains. diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 2aba5f5674d9..40269fe64c87 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -46,6 +46,12 @@ pub trait Chain: ChainBase + Clone { /// Keep in mind that this method is normally provided by the other chain, which is /// bridged with this chain. const BEST_FINALIZED_HEADER_ID_METHOD: &'static str; + /// Name of the runtime API method that is returning interval between source chain + /// headers that may be submitted for free to the target chain. + /// + /// Keep in mind that this method is normally provided by the other chain, which is + /// bridged with this chain. + const FREE_HEADERS_INTERVAL_METHOD: &'static str; /// Average block interval. /// @@ -75,6 +81,9 @@ pub trait ChainWithRuntimeVersion: Chain { pub trait RelayChain: Chain { /// Name of the `runtime_parachains::paras` pallet in the runtime of this chain. const PARAS_PALLET_NAME: &'static str; + /// Name of the `pallet-bridge-parachains`, deployed at the **bridged** chain to sync + /// parachains of **this** chain. + const WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME: &'static str; } /// Substrate-based chain that is using direct GRANDPA finality from minimal relay-client point of diff --git a/bridges/relays/client-substrate/src/test_chain.rs b/bridges/relays/client-substrate/src/test_chain.rs index d1203a2c58ea..cfd241c022a2 100644 --- a/bridges/relays/client-substrate/src/test_chain.rs +++ b/bridges/relays/client-substrate/src/test_chain.rs @@ -56,6 +56,7 @@ impl bp_runtime::Chain for TestChain { impl Chain for TestChain { const NAME: &'static str = "Test"; const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "TestMethod"; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = "TestMethod"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); type SignedBlock = sp_runtime::generic::SignedBlock< @@ -124,6 +125,7 @@ impl bp_runtime::UnderlyingChainProvider for TestParachain { impl Chain for TestParachain { const NAME: &'static str = "TestParachain"; const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "TestParachainMethod"; + const FREE_HEADERS_INTERVAL_METHOD: &'static str = "TestParachainMethod"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); type SignedBlock = sp_runtime::generic::SignedBlock< diff --git a/bridges/relays/finality/README.md b/bridges/relays/finality/README.md index 92e765cea0e5..89b9d1399584 100644 --- a/bridges/relays/finality/README.md +++ b/bridges/relays/finality/README.md @@ -33,7 +33,9 @@ node. The transaction is then tracked by the relay until it is mined and finaliz The main entrypoint for the crate is the [`run` function](./src/finality_loop.rs), which takes source and target clients and [`FinalitySyncParams`](./src/finality_loop.rs) parameters. The most important parameter is the `only_mandatory_headers` - it is set to `true`, the relay will only submit mandatory headers. Since transactions -with mandatory headers are fee-free, the cost of running such relay is zero (in terms of fees). +with mandatory headers are fee-free, the cost of running such relay is zero (in terms of fees). If a similar, +`only_free_headers` parameter, is set to `true`, then free headers (if configured in the runtime) are also +relayed. ## Finality Relay Metrics diff --git a/bridges/relays/finality/src/finality_loop.rs b/bridges/relays/finality/src/finality_loop.rs index e31d8a708122..8b3def868a45 100644 --- a/bridges/relays/finality/src/finality_loop.rs +++ b/bridges/relays/finality/src/finality_loop.rs @@ -29,7 +29,7 @@ use crate::{ use async_trait::async_trait; use backoff::{backoff::Backoff, ExponentialBackoff}; use futures::{future::Fuse, select, Future, FutureExt}; -use num_traits::Saturating; +use num_traits::{Saturating, Zero}; use relay_utils::{ metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient, HeaderId, MaybeConnectionError, TrackedTransactionStatus, TransactionTracker, @@ -39,6 +39,17 @@ use std::{ time::{Duration, Instant}, }; +/// Type of headers that we relay. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum HeadersToRelay { + /// Relay all headers. + All, + /// Relay only mandatory headers. + Mandatory, + /// Relay only free (including mandatory) headers. + Free, +} + /// Finality proof synchronization loop parameters. #[derive(Debug, Clone)] pub struct FinalitySyncParams { @@ -63,7 +74,7 @@ pub struct FinalitySyncParams { /// Timeout before we treat our transactions as lost and restart the whole sync process. pub stall_timeout: Duration, /// If true, only mandatory headers are relayed. - pub only_mandatory_headers: bool, + pub headers_to_relay: HeadersToRelay, } /// Source client used in finality synchronization loop. @@ -90,11 +101,16 @@ pub trait TargetClient: RelayClient { &self, ) -> Result, Self::Error>; + /// Get free source headers submission interval, if it is configured in the + /// target runtime. + async fn free_source_headers_interval(&self) -> Result, Self::Error>; + /// Submit header finality proof. async fn submit_finality_proof( &self, header: P::Header, proof: P::FinalityProof, + is_free_execution_expected: bool, ) -> Result; } @@ -104,9 +120,13 @@ pub fn metrics_prefix() -> String { format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) } +/// Finality sync information. pub struct SyncInfo { + /// Best finalized header at the source client. pub best_number_at_source: P::Number, + /// Best source header, known to the target client. pub best_number_at_target: P::Number, + /// Whether the target client follows the same fork as the source client do. pub is_using_same_fork: bool, } @@ -183,6 +203,7 @@ impl Transaction Result { let header_number = header.number(); log::debug!( @@ -193,7 +214,9 @@ impl Transaction, TC: TargetClient

> Finality pub async fn select_header_to_submit( &mut self, info: &SyncInfo

, + free_headers_interval: Option, ) -> Result>, Error> { // to see that the loop is progressing log::trace!( @@ -302,9 +326,15 @@ impl, TC: TargetClient

> Finality ); // read missing headers - let selector = JustifiedHeaderSelector::new::(&self.source_client, info).await?; + let selector = JustifiedHeaderSelector::new::( + &self.source_client, + info, + self.sync_params.headers_to_relay, + free_headers_interval, + ) + .await?; // if we see that the header schedules GRANDPA change, we need to submit it - if self.sync_params.only_mandatory_headers { + if self.sync_params.headers_to_relay == HeadersToRelay::Mandatory { return Ok(selector.select_mandatory()) } @@ -312,7 +342,12 @@ impl, TC: TargetClient

> Finality // => even if we have already selected some header and its persistent finality proof, // we may try to select better header by reading non-persistent proofs from the stream self.finality_proofs_buf.fill(&mut self.finality_proofs_stream); - let maybe_justified_header = selector.select(&self.finality_proofs_buf); + let maybe_justified_header = selector.select( + info, + self.sync_params.headers_to_relay, + free_headers_interval, + &self.finality_proofs_buf, + ); // remove obsolete 'recent' finality proofs + keep its size under certain limit let oldest_finality_proof_to_keep = maybe_justified_header @@ -329,6 +364,7 @@ impl, TC: TargetClient

> Finality pub async fn run_iteration( &mut self, + free_headers_interval: Option, ) -> Result< Option>, Error, @@ -345,12 +381,16 @@ impl, TC: TargetClient

> Finality } // submit new header if we have something new - match self.select_header_to_submit(&info).await? { + match self.select_header_to_submit(&info, free_headers_interval).await? { Some(header) => { - let transaction = - Transaction::submit(&self.target_client, header.header, header.proof) - .await - .map_err(Error::Target)?; + let transaction = Transaction::submit( + &self.target_client, + header.header, + header.proof, + self.sync_params.headers_to_relay == HeadersToRelay::Free, + ) + .await + .map_err(Error::Target)?; self.best_submitted_number = Some(transaction.header_number); Ok(Some(transaction)) }, @@ -378,9 +418,11 @@ impl, TC: TargetClient

> Finality let exit_signal = exit_signal.fuse(); futures::pin_mut!(exit_signal, proof_submission_tx_tracker); + let free_headers_interval = free_headers_interval(&self.target_client).await?; + loop { // run loop iteration - let next_tick = match self.run_iteration().await { + let next_tick = match self.run_iteration(free_headers_interval).await { Ok(Some(tx)) => { proof_submission_tx_tracker .set(tx.track::(self.target_client.clone()).fuse()); @@ -433,6 +475,52 @@ impl, TC: TargetClient

> Finality } } +async fn free_headers_interval( + target_client: &impl TargetClient

, +) -> Result, FailedClient> { + match target_client.free_source_headers_interval().await { + Ok(Some(free_headers_interval)) if !free_headers_interval.is_zero() => { + log::trace!( + target: "bridge", + "Free headers interval for {} headers at {} is: {:?}", + P::SOURCE_NAME, + P::TARGET_NAME, + free_headers_interval, + ); + Ok(Some(free_headers_interval)) + }, + Ok(Some(_free_headers_interval)) => { + log::trace!( + target: "bridge", + "Free headers interval for {} headers at {} is zero. Not submitting any free headers", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + Ok(None) + }, + Ok(None) => { + log::trace!( + target: "bridge", + "Free headers interval for {} headers at {} is None. Not submitting any free headers", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + + Ok(None) + }, + Err(e) => { + log::error!( + target: "bridge", + "Failed to read free headers interval for {} headers at {}: {:?}", + P::SOURCE_NAME, + P::TARGET_NAME, + e, + ); + Err(FailedClient::Target) + }, + } +} + /// Run finality proofs synchronization loop. pub async fn run( source_client: impl SourceClient

, @@ -509,7 +597,7 @@ mod tests { tick: Duration::from_secs(0), recent_finality_proofs_limit: 1024, stall_timeout: Duration::from_secs(1), - only_mandatory_headers: false, + headers_to_relay: HeadersToRelay::All, } } @@ -593,8 +681,8 @@ mod tests { ); } - fn run_only_mandatory_headers_mode_test( - only_mandatory_headers: bool, + fn run_headers_to_relay_mode_test( + headers_to_relay: HeadersToRelay, has_mandatory_headers: bool, ) -> Option> { let (exit_sender, _) = futures::channel::mpsc::unbounded(); @@ -619,7 +707,7 @@ mod tests { tick: Duration::from_secs(0), recent_finality_proofs_limit: 0, stall_timeout: Duration::from_secs(0), - only_mandatory_headers, + headers_to_relay, }, None, ); @@ -628,16 +716,22 @@ mod tests { best_number_at_target: 5, is_using_same_fork: true, }; - finality_loop.select_header_to_submit(&info).await.unwrap() + finality_loop.select_header_to_submit(&info, Some(3)).await.unwrap() }) } #[test] - fn select_header_to_submit_skips_non_mandatory_headers_when_only_mandatory_headers_are_required( - ) { - assert_eq!(run_only_mandatory_headers_mode_test(true, false), None); + fn select_header_to_submit_may_select_non_mandatory_header() { + assert_eq!(run_headers_to_relay_mode_test(HeadersToRelay::Mandatory, false), None); assert_eq!( - run_only_mandatory_headers_mode_test(false, false), + run_headers_to_relay_mode_test(HeadersToRelay::Free, false), + Some(JustifiedHeader { + header: TestSourceHeader(false, 10, 10), + proof: TestFinalityProof(10) + }), + ); + assert_eq!( + run_headers_to_relay_mode_test(HeadersToRelay::All, false), Some(JustifiedHeader { header: TestSourceHeader(false, 10, 10), proof: TestFinalityProof(10) @@ -646,17 +740,23 @@ mod tests { } #[test] - fn select_header_to_submit_selects_mandatory_headers_when_only_mandatory_headers_are_required() - { + fn select_header_to_submit_may_select_mandatory_header() { + assert_eq!( + run_headers_to_relay_mode_test(HeadersToRelay::Mandatory, true), + Some(JustifiedHeader { + header: TestSourceHeader(true, 8, 8), + proof: TestFinalityProof(8) + }), + ); assert_eq!( - run_only_mandatory_headers_mode_test(true, true), + run_headers_to_relay_mode_test(HeadersToRelay::Free, true), Some(JustifiedHeader { header: TestSourceHeader(true, 8, 8), proof: TestFinalityProof(8) }), ); assert_eq!( - run_only_mandatory_headers_mode_test(false, true), + run_headers_to_relay_mode_test(HeadersToRelay::All, true), Some(JustifiedHeader { header: TestSourceHeader(true, 8, 8), proof: TestFinalityProof(8) @@ -690,7 +790,7 @@ mod tests { test_sync_params(), Some(metrics_sync.clone()), ); - finality_loop.run_iteration().await.unwrap() + finality_loop.run_iteration(None).await.unwrap() }); assert!(!metrics_sync.is_using_same_fork()); diff --git a/bridges/relays/finality/src/headers.rs b/bridges/relays/finality/src/headers.rs index 91f7cd0378ec..5bba4a384562 100644 --- a/bridges/relays/finality/src/headers.rs +++ b/bridges/relays/finality/src/headers.rs @@ -16,10 +16,11 @@ use crate::{ finality_loop::SyncInfo, finality_proofs::FinalityProofsBuf, Error, FinalitySyncPipeline, - SourceClient, SourceHeader, TargetClient, + HeadersToRelay, SourceClient, SourceHeader, TargetClient, }; use bp_header_chain::FinalityProof; +use num_traits::Saturating; use std::cmp::Ordering; /// Unjustified headers container. Ordered by header number. @@ -50,9 +51,13 @@ pub enum JustifiedHeaderSelector { } impl JustifiedHeaderSelector

{ + /// Selects last header with persistent justification, missing from the target and matching + /// the `headers_to_relay` criteria. pub(crate) async fn new, TC: TargetClient

>( source_client: &SC, info: &SyncInfo

, + headers_to_relay: HeadersToRelay, + free_headers_interval: Option, ) -> Result> { let mut unjustified_headers = Vec::new(); let mut maybe_justified_header = None; @@ -70,12 +75,19 @@ impl JustifiedHeaderSelector

{ return Ok(Self::Mandatory(JustifiedHeader { header, proof })) }, (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), - (false, Some(proof)) => { + (false, Some(proof)) + if need_to_relay::

( + info, + headers_to_relay, + free_headers_interval, + &header, + ) => + { log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); unjustified_headers.clear(); maybe_justified_header = Some(JustifiedHeader { header, proof }); }, - (false, None) => { + _ => { unjustified_headers.push(header); }, } @@ -97,6 +109,7 @@ impl JustifiedHeaderSelector

{ }) } + /// Returns selected mandatory header if we have seen one. Otherwise returns `None`. pub fn select_mandatory(self) -> Option> { match self { JustifiedHeaderSelector::Mandatory(header) => Some(header), @@ -104,7 +117,15 @@ impl JustifiedHeaderSelector

{ } } - pub fn select(self, buf: &FinalityProofsBuf

) -> Option> { + /// Tries to improve previously selected header using ephemeral + /// justifications stream. + pub fn select( + self, + info: &SyncInfo

, + headers_to_relay: HeadersToRelay, + free_headers_interval: Option, + buf: &FinalityProofsBuf

, + ) -> Option> { let (unjustified_headers, maybe_justified_header) = match self { JustifiedHeaderSelector::Mandatory(justified_header) => return Some(justified_header), JustifiedHeaderSelector::Regular(unjustified_headers, justified_header) => @@ -122,7 +143,14 @@ impl JustifiedHeaderSelector

{ (maybe_finality_proof, maybe_unjustified_header) { match finality_proof.target_header_number().cmp(&unjustified_header.number()) { - Ordering::Equal => { + Ordering::Equal + if need_to_relay::

( + info, + headers_to_relay, + free_headers_interval, + &unjustified_header, + ) => + { log::trace!( target: "bridge", "Managed to improve selected {} finality proof {:?} to {:?}.", @@ -135,6 +163,10 @@ impl JustifiedHeaderSelector

{ proof: finality_proof.clone(), }) }, + Ordering::Equal => { + maybe_finality_proof = finality_proofs_iter.next(); + maybe_unjustified_header = unjustified_headers_iter.next(); + }, Ordering::Less => maybe_unjustified_header = unjustified_headers_iter.next(), Ordering::Greater => { maybe_finality_proof = finality_proofs_iter.next(); @@ -152,6 +184,27 @@ impl JustifiedHeaderSelector

{ } } +/// Returns true if we want to relay header `header_number`. +fn need_to_relay( + info: &SyncInfo

, + headers_to_relay: HeadersToRelay, + free_headers_interval: Option, + header: &P::Header, +) -> bool { + match headers_to_relay { + HeadersToRelay::All => true, + HeadersToRelay::Mandatory => header.is_mandatory(), + HeadersToRelay::Free => + header.is_mandatory() || + free_headers_interval + .map(|free_headers_interval| { + header.number().saturating_sub(info.best_number_at_target) >= + free_headers_interval + }) + .unwrap_or(false), + } +} + #[cfg(test)] mod tests { use super::*; @@ -159,13 +212,22 @@ mod tests { #[test] fn select_better_recent_finality_proof_works() { + let info = SyncInfo { + best_number_at_source: 10, + best_number_at_target: 5, + is_using_same_fork: true, + }; + // if there are no unjustified headers, nothing is changed let finality_proofs_buf = FinalityProofsBuf::::new(vec![TestFinalityProof(5)]); let justified_header = JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; let selector = JustifiedHeaderSelector::Regular(vec![], justified_header.clone()); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there are no buffered finality proofs, nothing is changed let finality_proofs_buf = FinalityProofsBuf::::new(vec![]); @@ -175,7 +237,10 @@ mod tests { vec![TestSourceHeader(false, 5, 5)], justified_header.clone(), ); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there's no intersection between recent finality proofs and unjustified headers, // nothing is changed @@ -189,7 +254,10 @@ mod tests { vec![TestSourceHeader(false, 9, 9), TestSourceHeader(false, 10, 10)], justified_header.clone(), ); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there's intersection between recent finality proofs and unjustified headers, but there // are no proofs in this intersection, nothing is changed @@ -207,7 +275,10 @@ mod tests { ], justified_header.clone(), ); - assert_eq!(selector.select(&finality_proofs_buf), Some(justified_header)); + assert_eq!( + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), + Some(justified_header) + ); // if there's intersection between recent finality proofs and unjustified headers and // there's a proof in this intersection: @@ -228,11 +299,63 @@ mod tests { justified_header, ); assert_eq!( - selector.select(&finality_proofs_buf), + selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), Some(JustifiedHeader { header: TestSourceHeader(false, 9, 9), proof: TestFinalityProof(9) }) ); + + // when only free headers needs to be relayed and there are no free headers + let finality_proofs_buf = FinalityProofsBuf::::new(vec![ + TestFinalityProof(7), + TestFinalityProof(9), + ]); + let selector = JustifiedHeaderSelector::None(vec![ + TestSourceHeader(false, 8, 8), + TestSourceHeader(false, 9, 9), + TestSourceHeader(false, 10, 10), + ]); + assert_eq!( + selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), + None, + ); + + // when only free headers needs to be relayed, mandatory header may be selected + let finality_proofs_buf = FinalityProofsBuf::::new(vec![ + TestFinalityProof(6), + TestFinalityProof(9), + ]); + let selector = JustifiedHeaderSelector::None(vec![ + TestSourceHeader(false, 8, 8), + TestSourceHeader(true, 9, 9), + TestSourceHeader(false, 10, 10), + ]); + assert_eq!( + selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), + Some(JustifiedHeader { + header: TestSourceHeader(true, 9, 9), + proof: TestFinalityProof(9) + }) + ); + + // when only free headers needs to be relayed and there is free header + let finality_proofs_buf = FinalityProofsBuf::::new(vec![ + TestFinalityProof(7), + TestFinalityProof(9), + TestFinalityProof(14), + ]); + let selector = JustifiedHeaderSelector::None(vec![ + TestSourceHeader(false, 7, 7), + TestSourceHeader(false, 10, 10), + TestSourceHeader(false, 14, 14), + ]); + assert_eq!( + selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), + Some(JustifiedHeader { + header: TestSourceHeader(false, 14, 14), + proof: TestFinalityProof(14) + }) + ); } } diff --git a/bridges/relays/finality/src/lib.rs b/bridges/relays/finality/src/lib.rs index 3579e68e1ef9..4346f96674b4 100644 --- a/bridges/relays/finality/src/lib.rs +++ b/bridges/relays/finality/src/lib.rs @@ -21,7 +21,9 @@ pub use crate::{ base::{FinalityPipeline, SourceClientBase}, - finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient}, + finality_loop::{ + metrics_prefix, run, FinalitySyncParams, HeadersToRelay, SourceClient, TargetClient, + }, finality_proofs::{FinalityProofsBuf, FinalityProofsStream}, sync_loop_metrics::SyncLoopMetrics, }; diff --git a/bridges/relays/finality/src/mock.rs b/bridges/relays/finality/src/mock.rs index e3ec4e4d0d47..69357f71ce27 100644 --- a/bridges/relays/finality/src/mock.rs +++ b/bridges/relays/finality/src/mock.rs @@ -198,10 +198,15 @@ impl TargetClient for TestTargetClient { Ok(data.target_best_block_id) } + async fn free_source_headers_interval(&self) -> Result, TestError> { + Ok(Some(3)) + } + async fn submit_finality_proof( &self, header: TestSourceHeader, proof: TestFinalityProof, + _is_free_execution_expected: bool, ) -> Result { let mut data = self.data.lock(); (self.on_method_call)(&mut data); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index 90558ed46138..cf1957c7323b 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -24,6 +24,7 @@ use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use crate::{ cli::{bridge::*, chain_schema::*, PrometheusParams}, finality::SubstrateFinalitySyncPipeline, + HeadersToRelay, }; /// Chain headers relaying params. @@ -33,6 +34,10 @@ pub struct RelayHeadersParams { /// are relayed. #[structopt(long)] only_mandatory_headers: bool, + /// If passed, only free headers (mandatory and every Nth header, if configured in runtime) + /// are relayed. Overrides `only_mandatory_headers`. + #[structopt(long)] + only_free_headers: bool, #[structopt(flatten)] source: SourceConnectionParams, #[structopt(flatten)] @@ -43,11 +48,22 @@ pub struct RelayHeadersParams { prometheus_params: PrometheusParams, } +impl RelayHeadersParams { + fn headers_to_relay(&self) -> HeadersToRelay { + match (self.only_mandatory_headers, self.only_free_headers) { + (_, true) => HeadersToRelay::Free, + (true, false) => HeadersToRelay::Mandatory, + _ => HeadersToRelay::All, + } + } +} + /// Trait used for relaying headers between 2 chains. #[async_trait] pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { /// Relay headers. async fn relay_headers(data: RelayHeadersParams) -> anyhow::Result<()> { + let headers_to_relay = data.headers_to_relay(); let source_client = data.source.into_client::().await?; let target_client = data.target.into_client::().await?; let target_transactions_mortality = data.target_sign.target_transactions_mortality; @@ -67,7 +83,7 @@ pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { crate::finality::run::( source_client, target_client, - data.only_mandatory_headers, + headers_to_relay, target_transactions_params, metrics_params, ) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index 27e9f1c21ba0..a796df6721b8 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -40,7 +40,7 @@ use crate::{ cli::{bridge::MessagesCliBridge, HexLaneId, PrometheusParams}, messages_lane::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, - TaggedAccount, TransactionParams, + HeadersToRelay, TaggedAccount, TransactionParams, }; use bp_messages::LaneId; use bp_runtime::BalanceOf; @@ -61,11 +61,25 @@ pub struct HeadersAndMessagesSharedParams { /// are relayed. #[structopt(long)] pub only_mandatory_headers: bool, + /// If passed, only free headers (mandatory and every Nth header, if configured in runtime) + /// are relayed. Overrides `only_mandatory_headers`. + #[structopt(long)] + pub only_free_headers: bool, #[structopt(flatten)] /// Prometheus metrics params. pub prometheus_params: PrometheusParams, } +impl HeadersAndMessagesSharedParams { + fn headers_to_relay(&self) -> HeadersToRelay { + match (self.only_mandatory_headers, self.only_free_headers) { + (_, true) => HeadersToRelay::Free, + (true, false) => HeadersToRelay::Mandatory, + _ => HeadersToRelay::All, + } + } +} + /// Bridge parameters, shared by all bridge types. pub struct Full2WayBridgeCommonParams< Left: ChainWithTransactions + ChainWithRuntimeVersion, @@ -418,6 +432,7 @@ mod tests { shared: HeadersAndMessagesSharedParams { lane: vec![HexLaneId([0x00, 0x00, 0x00, 0x00])], only_mandatory_headers: false, + only_free_headers: false, prometheus_params: PrometheusParams { no_prometheus: false, prometheus_host: "0.0.0.0".into(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs index 76accfa29050..7f6f40777823 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs @@ -180,7 +180,7 @@ where self.left_relay.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), Some(self.common.metrics_params.clone()), ); let right_relay_to_left_on_demand_headers = @@ -188,7 +188,7 @@ where self.right_relay.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), Some(self.common.metrics_params.clone()), ); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs index b75ac3e60c26..5911fe49df4a 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs @@ -171,7 +171,7 @@ where self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), None, ); let right_relay_to_left_on_demand_headers = @@ -179,7 +179,7 @@ where self.right_relay.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), Some(self.common.metrics_params.clone()), ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs index b397ff50a20a..832df4ae4003 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs @@ -152,7 +152,7 @@ where self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), None, ); let right_to_left_on_demand_headers = @@ -160,7 +160,7 @@ where self.common.right.client.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), - self.common.shared.only_mandatory_headers, + self.common.shared.headers_to_relay(), None, ); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index e5a52349469b..1425233add1e 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -43,6 +43,10 @@ pub struct RelayParachainsParams { target: TargetConnectionParams, #[structopt(flatten)] target_sign: TargetSigningParams, + /// If passed, only free headers (those, available at "free" relay chain headers) + /// are relayed. + #[structopt(long)] + only_free_headers: bool, #[structopt(flatten)] prometheus_params: PrometheusParams, } @@ -59,9 +63,9 @@ where { /// Start relaying parachains finality. async fn relay_parachains(data: RelayParachainsParams) -> anyhow::Result<()> { - let source_client = data.source.into_client::().await?; + let source_chain_client = data.source.into_client::().await?; let source_client = ParachainsSource::::new( - source_client, + source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -69,9 +73,10 @@ where signer: data.target_sign.to_keypair::()?, mortality: data.target_sign.target_transactions_mortality, }; - let target_client = data.target.into_client::().await?; + let target_chain_client = data.target.into_client::().await?; let target_client = ParachainsTarget::::new( - target_client.clone(), + source_chain_client, + target_chain_client, target_transaction_params, ); @@ -83,6 +88,7 @@ where source_client, target_client, metrics_params, + data.only_free_headers, futures::future::pending(), ) .await diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index 206f628b143b..a06857ae1d9b 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -25,7 +25,7 @@ use crate::{ use async_trait::async_trait; use bp_header_chain::justification::{GrandpaJustification, JustificationVerificationContext}; -use finality_relay::{FinalityPipeline, FinalitySyncPipeline}; +use finality_relay::{FinalityPipeline, FinalitySyncPipeline, HeadersToRelay}; use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, @@ -115,6 +115,7 @@ pub trait SubmitFinalityProofCallBuilder { fn build_submit_finality_proof_call( header: SyncHeader>, proof: SubstrateFinalityProof

, + is_free_execution_expected: bool, context: <

::FinalityEngine as Engine>::FinalityVerificationContext, ) -> CallOf; } @@ -142,6 +143,7 @@ where fn build_submit_finality_proof_call( header: SyncHeader>, proof: GrandpaJustification>, + _is_free_execution_expected: bool, _context: JustificationVerificationContext, ) -> CallOf { BridgeGrandpaCall::::submit_finality_proof { @@ -176,6 +178,7 @@ macro_rules! generate_submit_finality_proof_call_builder { <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain > >, + _is_free_execution_expected: bool, _context: bp_header_chain::justification::JustificationVerificationContext, ) -> relay_substrate_client::CallOf< <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::TargetChain @@ -215,6 +218,7 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain > >, + is_free_execution_expected: bool, context: bp_header_chain::justification::JustificationVerificationContext, ) -> relay_substrate_client::CallOf< <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::TargetChain @@ -223,7 +227,8 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { $bridge_grandpa($submit_finality_proof { finality_target: Box::new(header.into_inner()), justification: proof, - current_set_id: context.authority_set_id + current_set_id: context.authority_set_id, + is_free_execution_expected, }) } } @@ -235,15 +240,16 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { pub async fn run( source_client: Client, target_client: Client, - only_mandatory_headers: bool, + headers_to_relay: HeadersToRelay, transaction_params: TransactionParams>, metrics_params: MetricsParams, ) -> anyhow::Result<()> { log::info!( target: "bridge", - "Starting {} -> {} finality proof relay", + "Starting {} -> {} finality proof relay: relaying {:?} headers", P::SourceChain::NAME, P::TargetChain::NAME, + headers_to_relay, ); finality_relay::run( @@ -260,7 +266,7 @@ pub async fn run( P::TargetChain::AVERAGE_BLOCK_INTERVAL, relay_utils::STALL_TIMEOUT, ), - only_mandatory_headers, + headers_to_relay, }, metrics_params, futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 18464d523f4f..adbcfe0096d5 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -25,9 +25,10 @@ use crate::{ }; use async_trait::async_trait; +use bp_runtime::BlockNumberOf; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountKeyPairOf, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, + AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; @@ -103,10 +104,23 @@ impl TargetClient Result>, Self::Error> { + self.client + .typed_state_call( + P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), + (), + Some(self.client.best_header().await?.hash()), + ) + .await + } + async fn submit_finality_proof( &self, header: SyncHeader>, mut proof: SubstrateFinalityProof

, + is_free_execution_expected: bool, ) -> Result { // verify and runtime module at target chain may require optimized finality proof let context = @@ -115,7 +129,10 @@ impl TargetClient OnDemandHeadersRelay

{ source_client: Client, target_client: Client, target_transaction_params: TransactionParams>, - only_mandatory_headers: bool, + headers_to_relay: HeadersToRelay, metrics_params: Option, ) -> Self where @@ -94,7 +94,7 @@ impl OnDemandHeadersRelay

{ source_client, target_client, target_transaction_params, - only_mandatory_headers, + headers_to_relay, required_header_number, metrics_params, ) @@ -191,7 +191,7 @@ impl OnDemandRelay( source_client: Client, target_client: Client, target_transaction_params: TransactionParams>, - only_mandatory_headers: bool, + headers_to_relay: HeadersToRelay, required_header_number: RequiredHeaderNumberRef, metrics_params: Option, ) where @@ -346,11 +346,11 @@ async fn background_task( log::info!( target: "bridge", "[{}] Starting on-demand headers relay task\n\t\ - Only mandatory headers: {}\n\t\ + Headers to relay: {:?}\n\t\ Tx mortality: {:?} (~{}m)\n\t\ Stall timeout: {:?}", relay_task_name, - only_mandatory_headers, + headers_to_relay, target_transactions_mortality, stall_timeout.as_secs_f64() / 60.0f64, stall_timeout, @@ -367,7 +367,7 @@ async fn background_task( ), recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, stall_timeout, - only_mandatory_headers, + headers_to_relay, }, metrics_params.clone().unwrap_or_else(MetricsParams::disabled), futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index f67c002bba7f..966bdc310720 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -222,6 +222,7 @@ where proved_relay_block, vec![(para_id, para_hash)], para_proof, + false, )); Ok((proved_parachain_block, calls)) @@ -256,8 +257,11 @@ async fn background_task( let mut parachains_source = ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = - ParachainsTarget::

::new(target_client.clone(), target_transaction_params.clone()); + let mut parachains_target = ParachainsTarget::

::new( + source_relay_client.clone(), + target_client.clone(), + target_transaction_params.clone(), + ); loop { select! { @@ -392,6 +396,8 @@ async fn background_task( parachains_source.clone(), parachains_target.clone(), MetricsParams::disabled(), + // we do not support free parachain headers relay in on-demand relays + false, futures::future::pending(), ) .fuse(), @@ -481,7 +487,7 @@ where let para_header_at_target = best_finalized_peer_header_at_self::< P::TargetChain, P::SourceParachain, - >(target.client(), best_target_block_hash) + >(target.target_client(), best_target_block_hash) .await; // if there are no parachain heads at the target (`NoParachainHeadAtTarget`), we'll need to // submit at least one. Otherwise the pallet will be treated as uninitialized and messages @@ -504,7 +510,7 @@ where let relay_header_at_target = best_finalized_peer_header_at_self::< P::TargetChain, P::SourceRelayChain, - >(target.client(), best_target_block_hash) + >(target.target_client(), best_target_block_hash) .await .map_err(map_target_err)?; diff --git a/bridges/relays/lib-substrate-relay/src/parachains/mod.rs b/bridges/relays/lib-substrate-relay/src/parachains/mod.rs index 722f9b61f9f0..8b128bb770dd 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/mod.rs @@ -71,6 +71,7 @@ pub trait SubmitParachainHeadsCallBuilder: at_relay_block: HeaderIdOf, parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, + is_free_execution_expected: bool, ) -> CallOf; } @@ -97,6 +98,7 @@ where at_relay_block: HeaderIdOf, parachains: Vec<(ParaId, ParaHash)>, parachain_heads_proof: ParaHeadsProof, + _is_free_execution_expected: bool, ) -> CallOf { BridgeParachainsCall::::submit_parachain_heads { at_relay_block: (at_relay_block.0, at_relay_block.1), diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index 6df7bc0a742a..e10d15b6edf6 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -24,42 +24,53 @@ use crate::{ }; use async_trait::async_trait; -use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; -use bp_runtime::HeaderIdProvider; -use codec::Decode; +use bp_parachains::{ + ImportedParaHeadsKeyProvider, ParaInfo, ParaStoredHeaderData, ParasInfoKeyProvider, +}; +use bp_polkadot_core::{ + parachains::{ParaHash, ParaHeadsProof, ParaId}, + BlockNumber as RelayBlockNumber, +}; +use bp_runtime::{ + Chain as ChainBase, HeaderId, HeaderIdProvider, StorageDoubleMapKeyProvider, + StorageMapKeyProvider, +}; use parachains_relay::parachains_loop::TargetClient; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, Client, Error as SubstrateError, HeaderIdOf, - ParachainBase, TransactionEra, TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, Client, Error as SubstrateError, + HeaderIdOf, ParachainBase, RelayChain, TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; -use sp_core::{Bytes, Pair}; +use sp_core::Pair; /// Substrate client as parachain heads source. pub struct ParachainsTarget { - client: Client, + source_client: Client, + target_client: Client, transaction_params: TransactionParams>, } impl ParachainsTarget

{ /// Creates new parachains target client. pub fn new( - client: Client, + source_client: Client, + target_client: Client, transaction_params: TransactionParams>, ) -> Self { - ParachainsTarget { client, transaction_params } + ParachainsTarget { source_client, target_client, transaction_params } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { - &self.client + pub fn target_client(&self) -> &Client { + &self.target_client } } impl Clone for ParachainsTarget

{ fn clone(&self) -> Self { ParachainsTarget { - client: self.client.clone(), + source_client: self.source_client.clone(), + target_client: self.target_client.clone(), transaction_params: self.transaction_params.clone(), } } @@ -70,7 +81,9 @@ impl RelayClient for ParachainsTarget

{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await + self.target_client.reconnect().await?; + self.source_client.reconnect().await?; + Ok(()) } } @@ -79,11 +92,13 @@ impl

TargetClient> for ParachainsTarget

where P: SubstrateParachainsPipeline, AccountIdOf: From< as Pair>::Public>, + P::SourceParachain: ChainBase, + P::SourceRelayChain: ChainBase, { type TransactionTracker = TransactionTracker>; async fn best_block(&self) -> Result, Self::Error> { - let best_header = self.client.best_header().await?; + let best_header = self.target_client.best_header().await?; let best_id = best_header.id(); Ok(best_id) @@ -93,7 +108,7 @@ where &self, at_block: &HeaderIdOf, ) -> Result, Self::Error> { - self.client + self.target_client .typed_state_call::<_, Option>>( P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), @@ -104,23 +119,57 @@ where .unwrap_or(Err(SubstrateError::BridgePalletIsNotInitialized)) } + async fn free_source_relay_headers_interval( + &self, + ) -> Result>, Self::Error> { + self.target_client + .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) + .await + } + async fn parachain_head( &self, at_block: HeaderIdOf, - ) -> Result>, Self::Error> { - let encoded_best_finalized_source_para_block = self - .client - .state_call( - P::SourceParachain::BEST_FINALIZED_HEADER_ID_METHOD.into(), - Bytes(Vec::new()), - Some(at_block.1), - ) - .await?; + ) -> Result< + Option<(HeaderIdOf, HeaderIdOf)>, + Self::Error, + > { + // read best parachain head from the target bridge-parachains pallet + let storage_key = ParasInfoKeyProvider::final_key( + P::SourceRelayChain::WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME, + &P::SourceParachain::PARACHAIN_ID.into(), + ); + let storage_value: Option = + self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + let para_info = match storage_value { + Some(para_info) => para_info, + None => return Ok(None), + }; + + // now we need to get full header ids. For source relay chain it is simple, because we + // are connected + let relay_header_id = self + .source_client + .header_by_number(para_info.best_head_hash.at_relay_block_number) + .await? + .id(); - Ok(Option::>::decode( - &mut &encoded_best_finalized_source_para_block.0[..], - ) - .map_err(SubstrateError::ResponseParseFailed)?) + // for parachain, we need to read from the target chain runtime storage + let storage_key = ImportedParaHeadsKeyProvider::final_key( + P::SourceRelayChain::WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME, + &P::SourceParachain::PARACHAIN_ID.into(), + ¶_info.best_head_hash.head_hash, + ); + let storage_value: Option = + self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + let para_head_number = match storage_value { + Some(para_head_data) => + para_head_data.decode_parachain_head_data::()?.number, + None => return Ok(None), + }; + + let para_head_id = HeaderId(para_head_number, para_info.best_head_hash.head_hash); + Ok(Some((relay_header_id, para_head_id))) } async fn submit_parachain_head_proof( @@ -128,14 +177,16 @@ where at_relay_block: HeaderIdOf, updated_head_hash: ParaHash, proof: ParaHeadsProof, + is_free_execution_expected: bool, ) -> Result { let transaction_params = self.transaction_params.clone(); let call = P::SubmitParachainHeadsCallBuilder::build_submit_parachain_heads_call( at_relay_block, vec![(ParaId(P::SourceParachain::PARACHAIN_ID), updated_head_hash)], proof, + is_free_execution_expected, ); - self.client + self.target_client .submit_and_watch_signed_extrinsic( &transaction_params.signer, move |best_block_id, transaction_nonce| { diff --git a/bridges/relays/parachains/src/parachains_loop.rs b/bridges/relays/parachains/src/parachains_loop.rs index 41ebbf5aaded..55f236eeac1d 100644 --- a/bridges/relays/parachains/src/parachains_loop.rs +++ b/bridges/relays/parachains/src/parachains_loop.rs @@ -25,7 +25,7 @@ use futures::{ future::{FutureExt, Shared}, poll, select_biased, }; -use relay_substrate_client::{Chain, HeaderIdOf, ParachainBase}; +use relay_substrate_client::{BlockNumberOf, Chain, HeaderIdOf, ParachainBase}; use relay_utils::{ metrics::MetricsParams, relay_loop::Client as RelayClient, FailedClient, TrackedTransactionStatus, TransactionTracker, @@ -96,17 +96,27 @@ pub trait TargetClient: RelayClient { /// Get best block id. async fn best_block(&self) -> Result, Self::Error>; - /// Get best finalized source relay chain block id. + /// Get best finalized source relay chain block id. If `free_source_relay_headers_interval` + /// is `Some(_)`, the returned async fn best_finalized_source_relay_chain_block( &self, at_block: &HeaderIdOf, ) -> Result, Self::Error>; + /// Get free source **relay** headers submission interval, if it is configured in the + /// target runtime. We assume that the target chain will accept parachain header, proved + /// at such relay header for free. + async fn free_source_relay_headers_interval( + &self, + ) -> Result>, Self::Error>; /// Get parachain head id at given block. async fn parachain_head( &self, at_block: HeaderIdOf, - ) -> Result>, Self::Error>; + ) -> Result< + Option<(HeaderIdOf, HeaderIdOf)>, + Self::Error, + >; /// Submit parachain heads proof. async fn submit_parachain_head_proof( @@ -114,6 +124,7 @@ pub trait TargetClient: RelayClient { at_source_block: HeaderIdOf, para_head_hash: ParaHash, proof: ParaHeadsProof, + is_free_execution_expected: bool, ) -> Result; } @@ -133,6 +144,7 @@ pub async fn run( source_client: impl SourceClient

, target_client: impl TargetClient

, metrics_params: MetricsParams, + only_free_headers: bool, exit_signal: impl Future + 'static + Send, ) -> Result<(), relay_utils::Error> where @@ -145,7 +157,13 @@ where .expose() .await? .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost(source_client, target_client, metrics, exit_signal.clone()) + run_until_connection_lost( + source_client, + target_client, + metrics, + only_free_headers, + exit_signal.clone(), + ) }) .await } @@ -155,6 +173,7 @@ async fn run_until_connection_lost( source_client: impl SourceClient

, target_client: impl TargetClient

, metrics: Option, + only_free_headers: bool, exit_signal: impl Future + Send, ) -> Result<(), FailedClient> where @@ -166,6 +185,47 @@ where P::TargetChain::AVERAGE_BLOCK_INTERVAL, ); + // free parachain header = header, available (proved) at free relay chain block. Let's + // read interval of free source relay chain blocks from target client + let free_source_relay_headers_interval = if only_free_headers { + let free_source_relay_headers_interval = + target_client.free_source_relay_headers_interval().await.map_err(|e| { + log::warn!( + target: "bridge", + "Failed to read free {} headers interval at {}: {:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + e, + ); + FailedClient::Target + })?; + match free_source_relay_headers_interval { + Some(free_source_relay_headers_interval) if free_source_relay_headers_interval != 0 => { + log::trace!( + target: "bridge", + "Free {} headers interval at {}: {:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + free_source_relay_headers_interval, + ); + free_source_relay_headers_interval + }, + _ => { + log::warn!( + target: "bridge", + "Invalid free {} headers interval at {}: {:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + free_source_relay_headers_interval, + ); + return Err(FailedClient::Target) + }, + } + } else { + // ignore - we don't need it + 0 + }; + let mut submitted_heads_tracker: Option> = None; futures::pin_mut!(exit_signal); @@ -211,7 +271,7 @@ where log::warn!(target: "bridge", "Failed to read best {} block: {:?}", P::SourceRelayChain::NAME, e); FailedClient::Target })?; - let head_at_target = + let (relay_of_head_at_target, head_at_target) = read_head_at_target(&target_client, metrics.as_ref(), &best_target_block).await?; // check if our transaction has been mined @@ -238,9 +298,9 @@ where } } - // we have no active transaction and may need to update heads, but do we have something for - // update? - let best_finalized_relay_block = target_client + // in all-headers strategy we'll be submitting para head, available at + // `best_finalized_relay_block_at_target` + let best_finalized_relay_block_at_target = target_client .best_finalized_source_relay_chain_block(&best_target_block) .await .map_err(|e| { @@ -253,21 +313,56 @@ where ); FailedClient::Target })?; + + // ..but if we only need to submit free headers, we need to submit para + // head, available at best free source relay chain header, known to the + // target chain + let prove_at_relay_block = if only_free_headers { + match relay_of_head_at_target { + Some(relay_of_head_at_target) => { + // find last free relay chain header in the range that we are interested in + let scan_range_begin = relay_of_head_at_target.number(); + let scan_range_end = best_finalized_relay_block_at_target.number(); + if scan_range_end.saturating_sub(scan_range_begin) < + free_source_relay_headers_interval + { + // there are no new **free** relay chain headers in the range + log::trace!( + target: "bridge", + "Waiting for new free {} headers at {}: scanned {:?}..={:?}", + P::SourceRelayChain::NAME, + P::TargetChain::NAME, + scan_range_begin, + scan_range_end, + ); + continue; + } + + // we may submit new parachain head for free + best_finalized_relay_block_at_target + }, + None => { + // no parachain head at target => let's submit first one + best_finalized_relay_block_at_target + }, + } + } else { + best_finalized_relay_block_at_target + }; + + // now let's check if we need to update parachain head at all let head_at_source = - read_head_at_source(&source_client, metrics.as_ref(), &best_finalized_relay_block) - .await?; + read_head_at_source(&source_client, metrics.as_ref(), &prove_at_relay_block).await?; let is_update_required = is_update_required::

( head_at_source, head_at_target, - best_finalized_relay_block, + prove_at_relay_block, best_target_block, ); if is_update_required { - let (head_proof, head_hash) = source_client - .prove_parachain_head(best_finalized_relay_block) - .await - .map_err(|e| { + let (head_proof, head_hash) = + source_client.prove_parachain_head(prove_at_relay_block).await.map_err(|e| { log::warn!( target: "bridge", "Failed to prove {} parachain ParaId({}) heads: {:?}", @@ -283,12 +378,17 @@ where P::SourceRelayChain::NAME, P::SourceParachain::PARACHAIN_ID, P::TargetChain::NAME, - best_finalized_relay_block, + prove_at_relay_block, head_hash, ); let transaction_tracker = target_client - .submit_parachain_head_proof(best_finalized_relay_block, head_hash, head_proof) + .submit_parachain_head_proof( + prove_at_relay_block, + head_hash, + head_proof, + only_free_headers, + ) .await .map_err(|e| { log::warn!( @@ -311,7 +411,7 @@ where fn is_update_required( head_at_source: AvailableHeader>, head_at_target: Option>, - best_finalized_relay_block_at_source: HeaderIdOf, + prove_at_relay_block: HeaderIdOf, best_target_block: HeaderIdOf, ) -> bool where @@ -326,7 +426,7 @@ where P::SourceParachain::PARACHAIN_ID, P::TargetChain::NAME, P::SourceRelayChain::NAME, - best_finalized_relay_block_at_source, + prove_at_relay_block, head_at_source, P::TargetChain::NAME, best_target_block, @@ -413,24 +513,28 @@ async fn read_head_at_source( } } -/// Reads parachain head from the target client. +/// Reads parachain head from the target client. Also returns source relay chain header +/// that has been used to prove that head. async fn read_head_at_target( target_client: &impl TargetClient

, metrics: Option<&ParachainsLoopMetrics>, at_block: &HeaderIdOf, -) -> Result>, FailedClient> { +) -> Result< + (Option>, Option>), + FailedClient, +> { let para_head_id = target_client.parachain_head(*at_block).await; match para_head_id { - Ok(Some(para_head_id)) => { + Ok(Some((relay_header_id, para_head_id))) => { if let Some(metrics) = metrics { metrics.update_best_parachain_block_at_target( ParaId(P::SourceParachain::PARACHAIN_ID), para_head_id.number(), ); } - Ok(Some(para_head_id)) + Ok((Some(relay_header_id), Some(para_head_id))) }, - Ok(None) => Ok(None), + Ok(None) => Ok((None, None)), Err(e) => { log::warn!( target: "bridge", @@ -543,6 +647,7 @@ mod tests { use relay_substrate_client::test_chain::{TestChain, TestParachain}; use relay_utils::{HeaderId, MaybeConnectionError}; use sp_core::H256; + use std::collections::HashMap; const PARA_10_HASH: ParaHash = H256([10u8; 32]); const PARA_20_HASH: ParaHash = H256([20u8; 32]); @@ -590,14 +695,21 @@ mod tests { #[derive(Clone, Debug)] struct TestClientData { source_sync_status: Result, - source_head: Result>, TestError>, + source_head: HashMap< + BlockNumberOf, + Result>, TestError>, + >, source_proof: Result<(), TestError>, + target_free_source_relay_headers_interval: + Result>, TestError>, target_best_block: Result, TestError>, target_best_finalized_source_block: Result, TestError>, - target_head: Result>, TestError>, + #[allow(clippy::type_complexity)] + target_head: Result, HeaderIdOf)>, TestError>, target_submit_result: Result<(), TestError>, + submitted_proof_at_source_relay_block: Option>, exit_signal_sender: Option>>, } @@ -605,14 +717,18 @@ mod tests { pub fn minimal() -> Self { TestClientData { source_sync_status: Ok(true), - source_head: Ok(AvailableHeader::Available(HeaderId(0, PARA_20_HASH))), + source_head: vec![(0, Ok(AvailableHeader::Available(HeaderId(0, PARA_20_HASH))))] + .into_iter() + .collect(), source_proof: Ok(()), + target_free_source_relay_headers_interval: Ok(None), target_best_block: Ok(HeaderId(0, Default::default())), target_best_finalized_source_block: Ok(HeaderId(0, Default::default())), target_head: Ok(None), target_submit_result: Ok(()), + submitted_proof_at_source_relay_block: None, exit_signal_sender: None, } } @@ -649,16 +765,24 @@ mod tests { async fn parachain_head( &self, - _at_block: HeaderIdOf, + at_block: HeaderIdOf, ) -> Result>, TestError> { - self.data.lock().await.source_head.clone() + self.data + .lock() + .await + .source_head + .get(&at_block.0) + .expect(&format!("SourceClient::parachain_head({})", at_block.0)) + .clone() } async fn prove_parachain_head( &self, - _at_block: HeaderIdOf, + at_block: HeaderIdOf, ) -> Result<(ParaHeadsProof, ParaHash), TestError> { - let head = *self.data.lock().await.source_head.clone()?.as_available().unwrap(); + let head_result = + SourceClient::::parachain_head(self, at_block).await?; + let head = head_result.as_available().unwrap(); let storage_proof = vec![head.hash().encode()]; let proof = (ParaHeadsProof { storage_proof }, head.hash()); self.data.lock().await.source_proof.clone().map(|_| proof) @@ -680,21 +804,29 @@ mod tests { self.data.lock().await.target_best_finalized_source_block.clone() } + async fn free_source_relay_headers_interval( + &self, + ) -> Result>, TestError> { + self.data.lock().await.target_free_source_relay_headers_interval.clone() + } + async fn parachain_head( &self, _at_block: HeaderIdOf, - ) -> Result>, TestError> { + ) -> Result, HeaderIdOf)>, TestError> { self.data.lock().await.target_head.clone() } async fn submit_parachain_head_proof( &self, - _at_source_block: HeaderIdOf, + at_source_block: HeaderIdOf, _updated_parachain_head: ParaHash, _proof: ParaHeadsProof, + _is_free_execution_expected: bool, ) -> Result { let mut data = self.data.lock().await; data.target_submit_result.clone()?; + data.submitted_proof_at_source_relay_block = Some(at_source_block); if let Some(mut exit_signal_sender) = data.exit_signal_sender.take() { exit_signal_sender.send(()).await.unwrap(); @@ -715,6 +847,7 @@ mod tests { TestClient::from(test_source_client), TestClient::from(TestClientData::minimal()), None, + false, futures::future::pending(), )), Err(FailedClient::Source), @@ -731,6 +864,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -747,6 +881,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -763,6 +898,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -772,13 +908,14 @@ mod tests { #[test] fn when_source_client_fails_to_read_heads() { let mut test_source_client = TestClientData::minimal(); - test_source_client.source_head = Err(TestError::Error); + test_source_client.source_head.insert(0, Err(TestError::Error)); assert_eq!( async_std::task::block_on(run_until_connection_lost( TestClient::from(test_source_client), TestClient::from(TestClientData::minimal()), None, + false, futures::future::pending(), )), Err(FailedClient::Source), @@ -795,6 +932,7 @@ mod tests { TestClient::from(test_source_client), TestClient::from(TestClientData::minimal()), None, + false, futures::future::pending(), )), Err(FailedClient::Source), @@ -811,6 +949,7 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(test_target_client), None, + false, futures::future::pending(), )), Err(FailedClient::Target), @@ -825,12 +964,108 @@ mod tests { TestClient::from(TestClientData::minimal()), TestClient::from(TestClientData::with_exit_signal_sender(exit_signal_sender)), None, + false, exit_signal.into_future().map(|(_, _)| ()), )), Ok(()), ); } + #[async_std::test] + async fn free_headers_are_relayed() { + // prepare following case: + // 1) best source relay at target: 95 + // 2) best source parachain at target: 5 at relay 50 + // 3) free headers interval: 10 + // 4) at source relay chain block 90 source parachain block is 9 + // + + // 5) best finalized source relay chain block is 95 + // 6) at source relay chain block 95 source parachain block is 42 + // => + // parachain block 42 would have been relayed, because 95 - 50 > 10 + let (exit_signal_sender, exit_signal) = futures::channel::mpsc::unbounded(); + let clients_data = TestClientData { + source_sync_status: Ok(true), + source_head: vec![ + (90, Ok(AvailableHeader::Available(HeaderId(9, [9u8; 32].into())))), + (95, Ok(AvailableHeader::Available(HeaderId(42, [42u8; 32].into())))), + ] + .into_iter() + .collect(), + source_proof: Ok(()), + + target_free_source_relay_headers_interval: Ok(Some(10)), + target_best_block: Ok(HeaderId(200, [200u8; 32].into())), + target_best_finalized_source_block: Ok(HeaderId(95, [95u8; 32].into())), + target_head: Ok(Some((HeaderId(50, [50u8; 32].into()), HeaderId(5, [5u8; 32].into())))), + target_submit_result: Ok(()), + + submitted_proof_at_source_relay_block: None, + exit_signal_sender: Some(Box::new(exit_signal_sender)), + }; + + let source_client = TestClient::from(clients_data.clone()); + let target_client = TestClient::from(clients_data); + assert_eq!( + run_until_connection_lost( + source_client, + target_client.clone(), + None, + true, + exit_signal.into_future().map(|(_, _)| ()), + ) + .await, + Ok(()), + ); + + assert_eq!( + target_client + .data + .lock() + .await + .submitted_proof_at_source_relay_block + .map(|id| id.0), + Some(95) + ); + + // now source relay block chain 104 is mined with parachain head #84 + // => since 104 - 95 < 10, there are no free headers + // => nothing is submitted + let mut clients_data: TestClientData = target_client.data.lock().await.clone(); + clients_data + .source_head + .insert(104, Ok(AvailableHeader::Available(HeaderId(84, [84u8; 32].into())))); + clients_data.target_best_finalized_source_block = Ok(HeaderId(104, [104u8; 32].into())); + clients_data.target_head = + Ok(Some((HeaderId(95, [95u8; 32].into()), HeaderId(42, [42u8; 32].into())))); + clients_data.target_best_block = Ok(HeaderId(255, [255u8; 32].into())); + clients_data.exit_signal_sender = None; + + let source_client = TestClient::from(clients_data.clone()); + let target_client = TestClient::from(clients_data); + assert_eq!( + run_until_connection_lost( + source_client, + target_client.clone(), + None, + true, + async_std::task::sleep(std::time::Duration::from_millis(100)), + ) + .await, + Ok(()), + ); + + assert_eq!( + target_client + .data + .lock() + .await + .submitted_proof_at_source_relay_block + .map(|id| id.0), + Some(95) + ); + } + fn test_tx_tracker() -> SubmittedHeadsTracker { SubmittedHeadsTracker::new( AvailableHeader::Available(HeaderId(20, PARA_20_HASH)), diff --git a/prdoc/pr_4157.prdoc b/prdoc/pr_4157.prdoc new file mode 100644 index 000000000000..783eaa2dd427 --- /dev/null +++ b/prdoc/pr_4157.prdoc @@ -0,0 +1,29 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Bridge: added free headers submission support to the substrate-relay" + +doc: + - audience: Node Dev + description: | + Bridge finality and parachains relayer now supports mode, where it only submits some headers + for free. There's a setting in a runtime configuration, which introduces this "free header" + concept. Submitting such header is considered a common good deed, so it is free for relayers. + +crates: + - name: bp-bridge-hub-kusama + bump: major + - name: bp-bridge-hub-polkadot + bump: major + - name: bp-bridge-hub-rococo + bump: major + - name: bp-bridge-hub-westend + bump: major + - name: relay-substrate-client + bump: major + - name: finality-relay + bump: major + - name: substrate-relay-helper + bump: major + - name: parachains-relay + bump: major From b801d001e812778b1547352468d5f243b7070994 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Thu, 25 Apr 2024 10:47:46 +0200 Subject: [PATCH 54/74] Contracts: Stabilize XCM host fns (#4213) See https://github.com/paritytech/ink/pull/1912 https://github.com/paritytech/ink-docs/pull/338 --- prdoc/pr_4213.prdoc | 11 +++++++++++ substrate/frame/contracts/src/lib.rs | 4 ++-- substrate/frame/contracts/src/wasm/runtime.rs | 2 -- 3 files changed, 13 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_4213.prdoc diff --git a/prdoc/pr_4213.prdoc b/prdoc/pr_4213.prdoc new file mode 100644 index 000000000000..ce7eb65969b0 --- /dev/null +++ b/prdoc/pr_4213.prdoc @@ -0,0 +1,11 @@ +title: "[pallet-contracts] stabilize xcm_send and xcm_execute" + +doc: + - audience: Runtime Dev + description: | + `xcm_send` and `xcm_execute` are currently marked as unstable. This PR stabilizes them. +crates: +- name: pallet-contracts + bump: major + + diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 20cf7d1651cc..0045d72141c9 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -223,14 +223,14 @@ pub struct Environment { pub struct ApiVersion(u16); impl Default for ApiVersion { fn default() -> Self { - Self(2) + Self(3) } } #[test] fn api_version_is_up_to_date() { assert_eq!( - 109, + 111, crate::wasm::STABLE_API_COUNT, "Stable API count has changed. Bump the returned value of ApiVersion::default() and update the test." ); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 160dfa0d2f36..52ceda99edb7 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -2104,7 +2104,6 @@ pub mod env { /// Execute an XCM program locally, using the contract's address as the origin. /// See [`pallet_contracts_uapi::HostFn::execute_xcm`]. - #[unstable] fn xcm_execute( ctx: _, memory: _, @@ -2143,7 +2142,6 @@ pub mod env { /// Send an XCM program from the contract to the specified destination. /// See [`pallet_contracts_uapi::HostFn::send_xcm`]. - #[unstable] fn xcm_send( ctx: _, memory: _, From 077041788070eddc6f3c1043b9cb6146585b1469 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 25 Apr 2024 12:01:21 +0300 Subject: [PATCH 55/74] [XCM] Treat recursion limit error as transient in the MQ (#4202) Changes: - Add new error variant `ProcessMessageError::StackLimitReached` and treat XCM error `ExceedsStackLimit` as such. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Branislav Kontur --- .../xcm-builder/src/process_xcm_message.rs | 46 +++++++- polkadot/xcm/xcm-executor/src/lib.rs | 7 ++ prdoc/pr_4202.prdoc | 16 +++ substrate/frame/message-queue/src/lib.rs | 16 ++- substrate/frame/message-queue/src/mock.rs | 3 + substrate/frame/message-queue/src/tests.rs | 101 +++++++++++++++++- .../frame/support/src/traits/messages.rs | 4 + 7 files changed, 187 insertions(+), 6 deletions(-) create mode 100644 prdoc/pr_4202.prdoc diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index bcf91d8e68c3..7760274f6e24 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -102,7 +102,12 @@ impl< target: LOG_TARGET, "XCM message execution error: {error:?}", ); - (required, Err(ProcessMessageError::Unsupported)) + let error = match error { + xcm::latest::Error::ExceedsStackLimit => ProcessMessageError::StackLimitReached, + _ => ProcessMessageError::Unsupported, + }; + + (required, Err(error)) }, }; meter.consume(consumed); @@ -148,6 +153,45 @@ mod tests { } } + #[test] + fn process_message_exceeds_limits_fails() { + struct MockedExecutor; + impl ExecuteXcm<()> for MockedExecutor { + type Prepared = xcm_executor::WeighedMessage<()>; + fn prepare( + message: xcm::latest::Xcm<()>, + ) -> core::result::Result> { + Ok(xcm_executor::WeighedMessage::new(Weight::zero(), message)) + } + fn execute( + _: impl Into, + _: Self::Prepared, + _: &mut XcmHash, + _: Weight, + ) -> Outcome { + Outcome::Error { error: xcm::latest::Error::ExceedsStackLimit } + } + fn charge_fees(_location: impl Into, _fees: Assets) -> xcm::latest::Result { + unreachable!() + } + } + + type Processor = ProcessXcmMessage; + + let xcm = VersionedXcm::V4(xcm::latest::Xcm::<()>(vec![ + xcm::latest::Instruction::<()>::ClearOrigin, + ])); + assert_err!( + Processor::process_message( + &xcm.encode(), + ORIGIN, + &mut WeightMeter::new(), + &mut [0; 32] + ), + ProcessMessageError::StackLimitReached, + ); + } + #[test] fn process_message_overweight_fails() { for msg in [v3_xcm(true), v3_xcm(false), v3_xcm(false), v2_xcm(false)] { diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e673a46c4ac6..a7052328da00 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -182,6 +182,13 @@ impl PreparedMessage for WeighedMessage { } } +#[cfg(any(test, feature = "std"))] +impl WeighedMessage { + pub fn new(weight: Weight, message: Xcm) -> Self { + Self(weight, message) + } +} + impl ExecuteXcm for XcmExecutor { type Prepared = WeighedMessage; fn prepare( diff --git a/prdoc/pr_4202.prdoc b/prdoc/pr_4202.prdoc new file mode 100644 index 000000000000..6469c3c78407 --- /dev/null +++ b/prdoc/pr_4202.prdoc @@ -0,0 +1,16 @@ +title: "Treat XCM ExceedsStackLimit errors as transient in the MQ pallet" + +doc: + - audience: Runtime User + description: | + Fixes an issue where the MessageQueue can incorrectly assume that a message will permanently fail to process and disallow retrial of it. + +crates: + - name: frame-support + bump: major + - name: pallet-message-queue + bump: patch + - name: staging-xcm-builder + bump: patch + - name: staging-xcm-executor + bump: patch diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index ec85c785f79e..ef3420d21be5 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -765,6 +765,13 @@ enum MessageExecutionStatus { Processed, /// The message was processed and resulted in a, possibly permanent, error. Unprocessable { permanent: bool }, + /// The stack depth limit was reached. + /// + /// We cannot just return `Unprocessable` in this case, because the processability of the + /// message depends on how the function was called. This may be a permanent error if it was + /// called by a top-level function, or a transient error if it was already called in a nested + /// function. + StackLimitReached, } impl Pallet { @@ -984,7 +991,8 @@ impl Pallet { // additional overweight event being deposited. ) { Overweight | InsufficientWeight => Err(Error::::InsufficientWeight), - Unprocessable { permanent: false } => Err(Error::::TemporarilyUnprocessable), + StackLimitReached | Unprocessable { permanent: false } => + Err(Error::::TemporarilyUnprocessable), Unprocessable { permanent: true } | Processed => { page.note_processed_at_pos(pos); book_state.message_count.saturating_dec(); @@ -1250,7 +1258,7 @@ impl Pallet { let is_processed = match res { InsufficientWeight => return ItemExecutionStatus::Bailed, Unprocessable { permanent: false } => return ItemExecutionStatus::NoProgress, - Processed | Unprocessable { permanent: true } => true, + Processed | Unprocessable { permanent: true } | StackLimitReached => true, Overweight => false, }; @@ -1461,6 +1469,10 @@ impl Pallet { Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); MessageExecutionStatus::Unprocessable { permanent: true } }, + Err(error @ StackLimitReached) => { + Self::deposit_event(Event::::ProcessingFailed { id: id.into(), origin, error }); + MessageExecutionStatus::StackLimitReached + }, Ok(success) => { // Success let weight_used = meter.consumed().saturating_sub(prev_consumed); diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 1281de6b0a66..66a242d5a18f 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -198,6 +198,7 @@ impl ProcessMessage for RecordingMessageProcessor { parameter_types! { pub static Callback: Box = Box::new(|_, _| {}); + pub static IgnoreStackOvError: bool = false; } /// Processed a mocked message. Messages that end with `badformat`, `corrupt`, `unsupported` or @@ -216,6 +217,8 @@ fn processing_message(msg: &[u8], origin: &MessageOrigin) -> Result<(), ProcessM Err(ProcessMessageError::Unsupported) } else if msg.ends_with("yield") { Err(ProcessMessageError::Yield) + } else if msg.ends_with("stacklimitreached") && !IgnoreStackOvError::get() { + Err(ProcessMessageError::StackLimitReached) } else { Ok(()) } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index d6788847d571..e89fdb8b3208 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -174,9 +174,10 @@ fn service_queues_failing_messages_works() { MessageQueue::enqueue_message(msg("badformat"), Here); MessageQueue::enqueue_message(msg("corrupt"), Here); MessageQueue::enqueue_message(msg("unsupported"), Here); + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); MessageQueue::enqueue_message(msg("yield"), Here); // Starts with four pages. - assert_pages(&[0, 1, 2, 3]); + assert_pages(&[0, 1, 2, 3, 4]); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); assert_last_event::( @@ -206,9 +207,9 @@ fn service_queues_failing_messages_works() { .into(), ); assert_eq!(MessageQueue::service_queues(1.into_weight()), 1.into_weight()); - assert_eq!(System::events().len(), 3); + assert_eq!(System::events().len(), 4); // Last page with the `yield` stays in. - assert_pages(&[3]); + assert_pages(&[4]); }); } @@ -1880,3 +1881,97 @@ fn process_enqueued_on_idle_requires_enough_weight() { assert_eq!(MessagesProcessed::take(), vec![]); }) } + +/// A message that reports `StackLimitReached` will not be put into the overweight queue when +/// executed from the top level. +#[test] +fn process_discards_stack_ov_message() { + use MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + + MessageQueue::service_queues(10.into_weight()); + + assert_last_event::( + Event::ProcessingFailed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + error: ProcessMessageError::StackLimitReached, + } + .into(), + ); + + assert!(MessagesProcessed::take().is_empty()); + // Message is gone and not overweight: + assert_pages(&[]); + }); +} + +/// A message that reports `StackLimitReached` will stay in the overweight queue when it is executed +/// by `execute_overweight`. +#[test] +fn execute_overweight_keeps_stack_ov_message() { + use MessageOrigin::*; + build_and_execute::(|| { + // We need to create a mocked message that first reports insufficient weight, and then + // `StackLimitReached`: + IgnoreStackOvError::set(true); + MessageQueue::enqueue_message(msg("stacklimitreached"), Here); + MessageQueue::service_queues(0.into_weight()); + + assert_last_event::( + Event::OverweightEnqueued { + id: blake2_256(b"stacklimitreached"), + origin: MessageOrigin::Here, + message_index: 0, + page_index: 0, + } + .into(), + ); + // Does not count as 'processed': + assert!(MessagesProcessed::take().is_empty()); + assert_pages(&[0]); + + // Now let it return `StackLimitReached`. Note that this case would normally not happen, + // since we assume that the top-level execution is the one with the most remaining stack + // depth. + IgnoreStackOvError::set(false); + // Ensure that trying to execute the message does not change any state (besides events). + System::reset_events(); + let storage_noop = StorageNoopGuard::new(); + assert_eq!( + ::execute_overweight(3.into_weight(), (Here, 0, 0)), + Err(ExecuteOverweightError::Other) + ); + assert_last_event::( + Event::ProcessingFailed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + error: ProcessMessageError::StackLimitReached, + } + .into(), + ); + System::reset_events(); + drop(storage_noop); + + // Now let's process it normally: + IgnoreStackOvError::set(true); + assert_eq!( + ::execute_overweight(1.into_weight(), (Here, 0, 0)) + .unwrap(), + 1.into_weight() + ); + + assert_last_event::( + Event::Processed { + id: blake2_256(b"stacklimitreached").into(), + origin: MessageOrigin::Here, + weight_used: 1.into_weight(), + success: true, + } + .into(), + ); + assert_pages(&[]); + System::reset_events(); + }); +} diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index f3d893bcc1d8..2eec606b6d18 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -46,6 +46,8 @@ pub enum ProcessMessageError { /// the case that a queue is re-serviced within the same block after *yielding*. A queue is /// not required to *yield* again when it is being re-serviced withing the same block. Yield, + /// The message could not be processed for reaching the stack depth limit. + StackLimitReached, } /// Can process messages from a specific origin. @@ -96,6 +98,8 @@ pub trait ServiceQueues { /// - `weight_limit`: The maximum amount of dynamic weight that this call can use. /// /// Returns the dynamic weight used by this call; is never greater than `weight_limit`. + /// Should only be called in top-level runtime entry points like `on_initialize` or `on_idle`. + /// Otherwise, stack depth limit errors may be miss-handled. fn service_queues(weight_limit: Weight) -> Weight; /// Executes a message that could not be executed by [`Self::service_queues()`] because it was From 239a23d9cc712aab8c0a87eab7e558e5a149fd42 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Thu, 25 Apr 2024 13:11:07 +0300 Subject: [PATCH 56/74] Fix polkadot parachains not producing blocks until next session (#4269) ... a few sessions too late :(, this already happened on polkadot, so as of now there are no known relay-chains without async backing enabled in runtime, but let's fix it in case someone else wants to repeat our steps. Fixes: https://github.com/paritytech/polkadot-sdk/issues/4226 --------- Signed-off-by: Alexandru Gheorghe --- .../node/network/statement-distribution/src/v2/mod.rs | 11 ++++++++++- .../statement-distribution/src/v2/tests/mod.rs | 6 ++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 68caa5f0e700..118e34e92063 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -826,7 +826,16 @@ pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { // clean up sessions based on everything remaining. let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); state.per_session.retain(|s, _| sessions.contains(s)); - state.unused_topologies.retain(|s, _| sessions.contains(s)); + + let last_session_index = state.unused_topologies.keys().max().copied(); + // Do not clean-up the last saved toplogy unless we moved to the next session + // This is needed because handle_deactive_leaves, gets also called when + // prospective_parachains APIs are not present, so we would actually remove + // the topology without using it because `per_relay_parent` is empty until + // prospective_parachains gets enabled + state + .unused_topologies + .retain(|s, _| sessions.contains(s) || last_session_index == Some(*s)); } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 8dda7219cd12..3d987d3fc433 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -509,6 +509,12 @@ async fn setup_test_and_connect_peers( // Send gossip topology and activate leaf. if send_topology_before_leaf { send_new_topology(overseer, state.make_dummy_topology()).await; + // Send cleaning up of a leaf to make sure it does not clear the save topology as well. + overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::stop_work(Hash::random()), + ))) + .await; activate_leaf(overseer, &test_leaf, &state, true, vec![]).await; } else { activate_leaf(overseer, &test_leaf, &state, true, vec![]).await; From c26cf3f6f2d2b7f7783703308ece440c338459f8 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Thu, 25 Apr 2024 12:16:12 +0200 Subject: [PATCH 57/74] Do not re-prepare PVFs if not needed (#4211) Currently, PVFs are re-prepared if any execution environment parameter changes. As we've recently seen on Kusama and Polkadot, that may lead to a severe finality lag because every validator has to re-prepare every PVF. That cannot be avoided altogether; however, we could cease re-preparing PVFs when a change in the execution environment can't lead to a change in the artifact itself. For example, it's clear that changing the execution timeout cannot affect the artifact. In this PR, I'm introducing a separate hash for the subset of execution environment parameters that changes only if a preparation-related parameter changes. It introduces some minor code duplication, but without that, the scope of changes would be much bigger. TODO: - [x] Add a test to ensure the artifact is not re-prepared if non-preparation-related parameter is changed - [x] Add a test to ensure the artifact is re-prepared if a preparation-related parameter is changed - [x] Add comments, warnings, and, possibly, a test to ensure a new parameter ever added to the executor environment parameters will be evaluated by the author of changes with respect to its artifact preparation impact and added to the new hash preimage if needed. Closes #4132 --- polkadot/node/core/pvf/src/artifacts.rs | 19 ++-- polkadot/node/core/pvf/tests/it/main.rs | 72 +++++++++++++- polkadot/primitives/src/lib.rs | 2 +- polkadot/primitives/src/v7/executor_params.rs | 99 +++++++++++++++++++ polkadot/primitives/src/v7/mod.rs | 4 +- prdoc/pr_4211.prdoc | 15 +++ 6 files changed, 201 insertions(+), 10 deletions(-) create mode 100644 prdoc/pr_4211.prdoc diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 6288755526d4..a3a48b61acb1 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -58,7 +58,7 @@ use crate::{host::PrecheckResultSender, worker_interface::WORKER_DIR_PREFIX}; use always_assert::always; use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; use polkadot_parachain_primitives::primitives::ValidationCodeHash; -use polkadot_primitives::ExecutorParamsHash; +use polkadot_primitives::ExecutorParamsPrepHash; use std::{ collections::HashMap, fs, @@ -85,22 +85,27 @@ pub fn generate_artifact_path(cache_path: &Path) -> PathBuf { artifact_path } -/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. +/// Identifier of an artifact. Encodes a code hash of the PVF and a hash of preparation-related +/// executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { pub(crate) code_hash: ValidationCodeHash, - pub(crate) executor_params_hash: ExecutorParamsHash, + pub(crate) executor_params_prep_hash: ExecutorParamsPrepHash, } impl ArtifactId { /// Creates a new artifact ID with the given hash. - pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self { - Self { code_hash, executor_params_hash } + pub fn new( + code_hash: ValidationCodeHash, + executor_params_prep_hash: ExecutorParamsPrepHash, + ) -> Self { + Self { code_hash, executor_params_prep_hash } } - /// Returns an artifact ID that corresponds to the PVF with given executor params. + /// Returns an artifact ID that corresponds to the PVF with given preparation-related + /// executor parameters. pub fn from_pvf_prep_data(pvf: &PvfPrepData) -> Self { - Self::new(pvf.code_hash(), pvf.executor_params().hash()) + Self::new(pvf.code_hash(), pvf.executor_params().prep_hash()) } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 56cc681aff38..6961b93832ab 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -26,7 +26,7 @@ use polkadot_node_core_pvf::{ ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; -use polkadot_primitives::{ExecutorParam, ExecutorParams}; +use polkadot_primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use std::{io::Write, time::Duration}; use tokio::sync::Mutex; @@ -559,3 +559,73 @@ async fn nonexistent_cache_dir() { .await .unwrap(); } + +// Checks the the artifact is not re-prepared when the executor environment parameters change +// in a way not affecting the preparation +#[tokio::test] +async fn artifact_does_not_reprepare_on_non_meaningful_exec_parameter_change() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await; + let cache_dir = host.cache_dir.path(); + + let set1 = ExecutorParams::default(); + let set2 = + ExecutorParams::from(&[ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2500)][..]); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + + let md1 = { + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + std::fs::metadata(artifact_path.path()).unwrap() + }; + + // FS times are not monotonical so we wait 2 secs here to be sure that the creation time of the + // second attifact will be different + tokio::time::sleep(Duration::from_secs(2)).await; + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + + let md2 = { + let mut cache_dir: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + assert_eq!(cache_dir.len(), 2); + let mut artifact_path = cache_dir.pop().unwrap().unwrap(); + if artifact_path.path().is_dir() { + artifact_path = cache_dir.pop().unwrap().unwrap(); + } + std::fs::metadata(artifact_path.path()).unwrap() + }; + + assert_eq!(md1.created().unwrap(), md2.created().unwrap()); +} + +// Checks if the artifact is re-prepared if the re-preparation is needed by the nature of +// the execution environment parameters change +#[tokio::test] +async fn artifact_does_reprepare_on_meaningful_exec_parameter_change() { + let host = TestHost::new_with_config(|cfg| { + cfg.prepare_workers_hard_max_num = 1; + }) + .await; + let cache_dir = host.cache_dir.path(); + + let set1 = ExecutorParams::default(); + let set2 = + ExecutorParams::from(&[ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 60000)][..]); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set1).await.unwrap(); + let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + + assert_eq!(cache_dir_contents.len(), 2); + + let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), set2).await.unwrap(); + let cache_dir_contents: Vec<_> = std::fs::read_dir(cache_dir).unwrap().collect(); + + assert_eq!(cache_dir_contents.len(), 3); // new artifact has been added +} diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index d4eeb3cc3d29..01f393086a66 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -44,7 +44,7 @@ pub use v7::{ CandidateReceipt, CheckedDisputeStatementSet, CheckedMultiDisputeStatementSet, CollatorId, CollatorSignature, CommittedCandidateReceipt, CompactStatement, ConsensusLog, CoreIndex, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, DownwardMessage, EncodeAs, - ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, ExplicitDisputeStatement, GroupIndex, GroupRotationInfo, Hash, HashT, HeadData, Header, HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, NodeFeatures, diff --git a/polkadot/primitives/src/v7/executor_params.rs b/polkadot/primitives/src/v7/executor_params.rs index 1e19f3b23fec..918a7f17a7e3 100644 --- a/polkadot/primitives/src/v7/executor_params.rs +++ b/polkadot/primitives/src/v7/executor_params.rs @@ -152,13 +152,42 @@ impl sp_std::fmt::LowerHex for ExecutorParamsHash { } } +/// Unit type wrapper around [`type@Hash`] that represents a hash of preparation-related +/// executor parameters. +/// +/// This type is produced by [`ExecutorParams::prep_hash`]. +#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, PartialOrd, Ord, TypeInfo)] +pub struct ExecutorParamsPrepHash(Hash); + +impl sp_std::fmt::Display for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + self.0.fmt(f) + } +} + +impl sp_std::fmt::Debug for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl sp_std::fmt::LowerHex for ExecutorParamsPrepHash { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + sp_std::fmt::LowerHex::fmt(&self.0, f) + } +} + /// # Deterministically serialized execution environment semantics /// Represents an arbitrary semantics of an arbitrary execution environment, so should be kept as /// abstract as possible. +// // ADR: For mandatory entries, mandatoriness should be enforced in code rather than separating them // into individual fields of the structure. Thus, complex migrations shall be avoided when adding // new entries and removing old ones. At the moment, there's no mandatory parameters defined. If // they show up, they must be clearly documented as mandatory ones. +// +// !!! Any new parameter that does not affect the prepared artifact must be added to the exclusion +// !!! list in `prep_hash()` to avoid unneccessary artifact rebuilds. #[derive( Clone, Debug, Default, Encode, Decode, PartialEq, Eq, TypeInfo, Serialize, Deserialize, )] @@ -175,6 +204,28 @@ impl ExecutorParams { ExecutorParamsHash(BlakeTwo256::hash(&self.encode())) } + /// Returns hash of preparation-related executor parameters + pub fn prep_hash(&self) -> ExecutorParamsPrepHash { + use ExecutorParam::*; + + let mut enc = b"prep".to_vec(); + + self.0 + .iter() + .flat_map(|param| match param { + MaxMemoryPages(..) => None, + StackLogicalMax(..) => Some(param), + StackNativeMax(..) => None, + PrecheckingMaxMemory(..) => None, + PvfPrepTimeout(..) => Some(param), + PvfExecTimeout(..) => None, + WasmExtBulkMemory => Some(param), + }) + .for_each(|p| enc.extend(p.encode())); + + ExecutorParamsPrepHash(BlakeTwo256::hash(&enc)) + } + /// Returns a PVF preparation timeout, if any pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option { for param in &self.0 { @@ -336,3 +387,51 @@ impl From<&[ExecutorParam]> for ExecutorParams { ExecutorParams(arr.to_vec()) } } + +// This test ensures the hash generated by `prep_hash()` changes if any preparation-related +// executor parameter changes. If you're adding a new executor parameter, you must add it into +// this test, and if changing that parameter may not affect the artifact produced on the +// preparation step, it must be added to the list of exlusions in `pre_hash()` as well. +// See also `prep_hash()` comments. +#[test] +fn ensure_prep_hash_changes() { + use ExecutorParam::*; + let ep = ExecutorParams::from( + &[ + MaxMemoryPages(0), + StackLogicalMax(0), + StackNativeMax(0), + PrecheckingMaxMemory(0), + PvfPrepTimeout(PvfPrepKind::Precheck, 0), + PvfPrepTimeout(PvfPrepKind::Prepare, 0), + PvfExecTimeout(PvfExecKind::Backing, 0), + PvfExecTimeout(PvfExecKind::Approval, 0), + WasmExtBulkMemory, + ][..], + ); + + for p in ep.iter() { + let (ep1, ep2) = match p { + MaxMemoryPages(_) => continue, + StackLogicalMax(_) => ( + ExecutorParams::from(&[StackLogicalMax(1)][..]), + ExecutorParams::from(&[StackLogicalMax(2)][..]), + ), + StackNativeMax(_) => continue, + PrecheckingMaxMemory(_) => continue, + PvfPrepTimeout(PvfPrepKind::Precheck, _) => ( + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Precheck, 1)][..]), + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Precheck, 2)][..]), + ), + PvfPrepTimeout(PvfPrepKind::Prepare, _) => ( + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Prepare, 1)][..]), + ExecutorParams::from(&[PvfPrepTimeout(PvfPrepKind::Prepare, 2)][..]), + ), + PvfExecTimeout(_, _) => continue, + WasmExtBulkMemory => + (ExecutorParams::default(), ExecutorParams::from(&[WasmExtBulkMemory][..])), + }; + + assert_ne!(ep1.prep_hash(), ep2.prep_hash()); + } +} diff --git a/polkadot/primitives/src/v7/mod.rs b/polkadot/primitives/src/v7/mod.rs index 5647bfe68d56..8a059408496c 100644 --- a/polkadot/primitives/src/v7/mod.rs +++ b/polkadot/primitives/src/v7/mod.rs @@ -62,7 +62,9 @@ pub mod executor_params; pub mod slashing; pub use async_backing::AsyncBackingParams; -pub use executor_params::{ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash}; +pub use executor_params::{ + ExecutorParam, ExecutorParamError, ExecutorParams, ExecutorParamsHash, ExecutorParamsPrepHash, +}; mod metrics; pub use metrics::{ diff --git a/prdoc/pr_4211.prdoc b/prdoc/pr_4211.prdoc new file mode 100644 index 000000000000..161dc8485e83 --- /dev/null +++ b/prdoc/pr_4211.prdoc @@ -0,0 +1,15 @@ +title: "Re-prepare PVF artifacts only if needed" + +doc: + - audience: Node Dev + description: | + When a change in the executor environment parameters can not affect the prepared artifact, + it is preserved without recompilation and used for future executions. That mitigates + situations where every unrelated executor parameter change resulted in re-preparing every + artifact on every validator, causing a significant finality lag. + +crates: + - name: polkadot-node-core-pvf + bump: minor + - name: polkadot-primitives + bump: minor From ff2b178206f9952c3337638659450c67fd700e7e Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 25 Apr 2024 22:01:05 +1000 Subject: [PATCH 58/74] remote-externalities: retry get child keys query (#4280) --- .../frame/remote-externalities/src/lib.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index e429d39669f1..58cb901470c1 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -830,19 +830,22 @@ where child_prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { - // This is deprecated and will generate a warning which causes the CI to fail. - #[allow(warnings)] - let child_keys = substrate_rpc_client::ChildStateApi::storage_keys( - client, - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix, - Some(at), - ) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + let retry_strategy = + FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); + let get_child_keys_closure = || { + #[allow(deprecated)] + substrate_rpc_client::ChildStateApi::storage_keys( + client, + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + child_prefix.clone(), + Some(at), + ) + }; + let child_keys = + Retry::spawn(retry_strategy, get_child_keys_closure).await.map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; debug!( target: LOG_TARGET, From c9923cd7feb9e7c6337f0942abd3279468df5559 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Thu, 25 Apr 2024 16:52:24 +0300 Subject: [PATCH 59/74] rename fragment_tree folder to fragment_chain (#4294) Makes https://github.com/paritytech/polkadot-sdk/pull/4035 easier to review --- .../src/{fragment_tree => fragment_chain}/mod.rs | 0 .../src/{fragment_tree => fragment_chain}/tests.rs | 0 polkadot/node/core/prospective-parachains/src/lib.rs | 12 ++++++------ 3 files changed, 6 insertions(+), 6 deletions(-) rename polkadot/node/core/prospective-parachains/src/{fragment_tree => fragment_chain}/mod.rs (100%) rename polkadot/node/core/prospective-parachains/src/{fragment_tree => fragment_chain}/tests.rs (100%) diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs similarity index 100% rename from polkadot/node/core/prospective-parachains/src/fragment_tree/mod.rs rename to polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs diff --git a/polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs similarity index 100% rename from polkadot/node/core/prospective-parachains/src/fragment_tree/tests.rs rename to polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index f5d50fb74fac..0b1a2e034a28 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -55,13 +55,13 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, - fragment_tree::{ + fragment_chain::{ CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope, }, }; mod error; -mod fragment_tree; +mod fragment_chain; #[cfg(test)] mod tests; @@ -349,7 +349,7 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { struct ImportablePendingAvailability { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - compact: crate::fragment_tree::PendingAvailability, + compact: crate::fragment_chain::PendingAvailability, } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -394,7 +394,7 @@ async fn preprocess_candidates_pending_availability( relay_parent_number: relay_parent.number, relay_parent_storage_root: relay_parent.storage_root, }, - compact: crate::fragment_tree::PendingAvailability { + compact: crate::fragment_chain::PendingAvailability { candidate_hash: pending.candidate_hash, relay_parent, }, @@ -675,7 +675,7 @@ fn answer_hypothetical_frontier_request( let candidate_hash = c.candidate_hash(); let hypothetical = match c { HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => - fragment_tree::HypotheticalCandidate::Complete { + fragment_chain::HypotheticalCandidate::Complete { receipt: Cow::Borrowed(receipt), persisted_validation_data: Cow::Borrowed(persisted_validation_data), }, @@ -683,7 +683,7 @@ fn answer_hypothetical_frontier_request( parent_head_data_hash, candidate_relay_parent, .. - } => fragment_tree::HypotheticalCandidate::Incomplete { + } => fragment_chain::HypotheticalCandidate::Incomplete { relay_parent: *candidate_relay_parent, parent_head_data_hash: *parent_head_data_hash, }, From 8f5c8f735af9048b83957821db7fb363e89e919f Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 25 Apr 2024 17:04:20 +0200 Subject: [PATCH 60/74] Update approval-voting banchmarks base values (#4283) --- .../benches/approval-voting-regression-bench.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs index 7157362a79c7..9a5f0d29dbd3 100644 --- a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs +++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs @@ -77,12 +77,12 @@ fn main() -> Result<(), String> { // We expect no variance for received and sent // but use 0.001 because we operate with floats messages.extend(average_usage.check_network_usage(&[ - ("Received from peers", 52944.7000, 0.001), - ("Sent to peers", 63532.2000, 0.001), + ("Received from peers", 52942.4600, 0.001), + ("Sent to peers", 63547.0330, 0.001), ])); messages.extend(average_usage.check_cpu_usage(&[ - ("approval-distribution", 7.7883, 0.1), - ("approval-voting", 10.4655, 0.1), + ("approval-distribution", 7.0317, 0.1), + ("approval-voting", 9.5751, 0.1), ])); if messages.is_empty() { From dd5b06e622c6c5c301a1554286ec1f4995c7daca Mon Sep 17 00:00:00 2001 From: Andrei Eres Date: Thu, 25 Apr 2024 17:06:37 +0200 Subject: [PATCH 61/74] [subsystem-benchmarks] Log standart deviation for subsystem-benchmarks (#4285) Should help us to understand more what's happening between individual runs and possibly adjust the number of runs --- polkadot/node/subsystem-bench/src/lib/usage.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/polkadot/node/subsystem-bench/src/lib/usage.rs b/polkadot/node/subsystem-bench/src/lib/usage.rs index 59296746ec3d..bfaac3265a2e 100644 --- a/polkadot/node/subsystem-bench/src/lib/usage.rs +++ b/polkadot/node/subsystem-bench/src/lib/usage.rs @@ -161,6 +161,13 @@ impl ResourceUsage { for (resource_name, values) in by_name { let total = values.iter().map(|v| v.total).sum::() / values.len() as f64; let per_block = values.iter().map(|v| v.per_block).sum::() / values.len() as f64; + let per_block_sd = + standard_deviation(&values.iter().map(|v| v.per_block).collect::>()); + println!( + "[{}] standart_deviation {:.2}%", + resource_name, + per_block_sd / per_block * 100.0 + ); average.push(Self { resource_name, total, per_block }); } average @@ -179,3 +186,11 @@ pub struct ChartItem { pub unit: String, pub value: f64, } + +fn standard_deviation(values: &[f64]) -> f64 { + let n = values.len() as f64; + let mean = values.iter().sum::() / n; + let variance = values.iter().map(|v| (v - mean).powi(2)).sum::() / (n - 1.0); + + variance.sqrt() +} From 8f8c49deffe56567ba5cde0e1047de15b660bb0e Mon Sep 17 00:00:00 2001 From: Noah Jelich <12912633+njelich@users.noreply.github.com> Date: Fri, 26 Apr 2024 09:03:53 +0200 Subject: [PATCH 62/74] Fix bad links (#4231) The solochain template links to parachain template instead of solochain. --- templates/solochain/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/solochain/README.md b/templates/solochain/README.md index 6390c9524ce1..37c65797dcb0 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -4,10 +4,10 @@ A fresh [Substrate](https://substrate.io/) node, ready for hacking :rocket: A standalone version of this template is available for each release of Polkadot in the [Substrate Developer Hub Parachain -Template](https://github.com/substrate-developer-hub/substrate-parachain-template/) +Template](https://github.com/substrate-developer-hub/substrate-node-template/) repository. The parachain template is generated directly at each Polkadot -release branch from the [Node Template in -Substrate](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template) +release branch from the [Solochain Template in +Substrate](https://github.com/paritytech/polkadot-sdk/tree/master/templates/solochain) upstream It is usually best to use the stand-alone version to start a new project. All From e8f7c81db66abb40802c582c22041aa63c78ddff Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 26 Apr 2024 11:16:03 +0300 Subject: [PATCH 63/74] [balances] Safeguard against consumer ref underflow (#3865) There are some accounts that do not have a consumer ref while having a reserve. This adds a fail-safe mechanism to trigger in the case that `does_consume` is true, but the assumption of `consumer>0` is not. This should prevent those accounts from loosing balance and the TI from getting messed up even more, but is not an "ideal" fix. TBH an ideal fix is not possible, since on-chain data is in an invalid state. --------- Signed-off-by: Oliver Tale-Yazdi --- prdoc/pr_3865.prdoc | 11 ++ substrate/frame/balances/Cargo.toml | 1 + substrate/frame/balances/src/lib.rs | 7 ++ .../frame/balances/src/tests/general_tests.rs | 111 ++++++++++++++++++ substrate/frame/balances/src/tests/mod.rs | 20 +++- substrate/frame/balances/src/types.rs | 2 +- 6 files changed, 150 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_3865.prdoc create mode 100644 substrate/frame/balances/src/tests/general_tests.rs diff --git a/prdoc/pr_3865.prdoc b/prdoc/pr_3865.prdoc new file mode 100644 index 000000000000..8e39c04825b1 --- /dev/null +++ b/prdoc/pr_3865.prdoc @@ -0,0 +1,11 @@ +title: "Balances: add failsafe for consumer ref underflow" + +doc: + - audience: Runtime Dev + description: | + Pallet balances now handles the case that historic accounts violate a invariant that they should have a consumer ref on `reserved > 0` balance. + This disallows such accounts from reaping and should prevent TI from getting messed up even more. + +crates: + - name: pallet-balances + bump: patch diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index 28eabdaf5062..1cc9ac5d8fd2 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -28,6 +28,7 @@ docify = "0.2.8" [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } +frame-support = { path = "../support", features = ["experimental"] } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } paste = "1.0.12" diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 685b12499ac0..bd811955d63c 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -954,6 +954,13 @@ pub mod pallet { if !did_consume && does_consume { frame_system::Pallet::::inc_consumers(who)?; } + if does_consume && frame_system::Pallet::::consumers(who) == 0 { + // NOTE: This is a failsafe and should not happen for normal accounts. A normal + // account should have gotten a consumer ref in `!did_consume && does_consume` + // at some point. + log::error!(target: LOG_TARGET, "Defensively bumping a consumer ref."); + frame_system::Pallet::::inc_consumers(who)?; + } if did_provide && !does_provide { // This could reap the account so must go last. frame_system::Pallet::::dec_providers(who).map_err(|r| { diff --git a/substrate/frame/balances/src/tests/general_tests.rs b/substrate/frame/balances/src/tests/general_tests.rs new file mode 100644 index 000000000000..0f3e015d0a89 --- /dev/null +++ b/substrate/frame/balances/src/tests/general_tests.rs @@ -0,0 +1,111 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg(test)] + +use crate::{ + system::AccountInfo, + tests::{ensure_ti_valid, Balances, ExtBuilder, System, Test, TestId, UseSystem}, + AccountData, ExtraFlags, TotalIssuance, +}; +use frame_support::{ + assert_noop, assert_ok, hypothetically, + traits::{ + fungible::{Mutate, MutateHold}, + tokens::Precision, + }, +}; +use sp_runtime::DispatchError; + +/// There are some accounts that have one consumer ref too few. These accounts are at risk of losing +/// their held (reserved) balance. They do not just lose it - it is also not accounted for in the +/// Total Issuance. Here we test the case that the account does not reap in such a case, but gets +/// one consumer ref for its reserved balance. +#[test] +fn regression_historic_acc_does_not_evaporate_reserve() { + ExtBuilder::default().build_and_execute_with(|| { + UseSystem::set(true); + let (alice, bob) = (0, 1); + // Alice is in a bad state with consumer == 0 && reserved > 0: + Balances::set_balance(&alice, 100); + TotalIssuance::::put(100); + ensure_ti_valid(); + + assert_ok!(Balances::hold(&TestId::Foo, &alice, 10)); + // This is the issue of the account: + System::dec_consumers(&alice); + + assert_eq!( + System::account(&alice), + AccountInfo { + data: AccountData { + free: 90, + reserved: 10, + frozen: 0, + flags: ExtraFlags(1u128 << 127), + }, + nonce: 0, + consumers: 0, // should be 1 on a good acc + providers: 1, + sufficients: 0, + } + ); + + ensure_ti_valid(); + + // Reaping the account is prevented by the new logic: + assert_noop!( + Balances::transfer_allow_death(Some(alice).into(), bob, 90), + DispatchError::ConsumerRemaining + ); + assert_noop!( + Balances::transfer_all(Some(alice).into(), bob, false), + DispatchError::ConsumerRemaining + ); + + // normal transfers still work: + hypothetically!({ + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + // Alice got back her consumer ref: + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + hypothetically!({ + assert_ok!(Balances::transfer_all(Some(alice).into(), bob, true)); + // Alice got back her consumer ref: + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + + // un-reserving all does not add a consumer ref: + hypothetically!({ + assert_ok!(Balances::release(&TestId::Foo, &alice, 10, Precision::Exact)); + assert_eq!(System::consumers(&alice), 0); + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + assert_eq!(System::consumers(&alice), 0); + ensure_ti_valid(); + }); + // un-reserving some does add a consumer ref: + hypothetically!({ + assert_ok!(Balances::release(&TestId::Foo, &alice, 5, Precision::Exact)); + assert_eq!(System::consumers(&alice), 1); + assert_ok!(Balances::transfer_keep_alive(Some(alice).into(), bob, 40)); + assert_eq!(System::consumers(&alice), 1); + ensure_ti_valid(); + }); + }); +} diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index 234fe6eaf2c3..0abf2251290f 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; +use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet, TotalIssuance}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ assert_err, assert_noop, assert_ok, assert_storage_noop, derive_impl, @@ -47,6 +47,7 @@ mod currency_tests; mod dispatchable_tests; mod fungible_conformance_tests; mod fungible_tests; +mod general_tests; mod reentrancy_tests; type Block = frame_system::mocking::MockBlock; @@ -278,6 +279,23 @@ pub fn info_from_weight(w: Weight) -> DispatchInfo { DispatchInfo { weight: w, ..Default::default() } } +/// Check that the total-issuance matches the sum of all accounts' total balances. +pub fn ensure_ti_valid() { + let mut sum = 0; + + for acc in frame_system::Account::::iter_keys() { + if UseSystem::get() { + let data = frame_system::Pallet::::account(acc); + sum += data.data.total(); + } else { + let data = crate::Account::::get(acc); + sum += data.total(); + } + } + + assert_eq!(TotalIssuance::::get(), sum, "Total Issuance wrong"); +} + #[test] fn weights_sane() { let info = crate::Call::::transfer_allow_death { dest: 10, value: 4 }.get_dispatch_info(); diff --git a/substrate/frame/balances/src/types.rs b/substrate/frame/balances/src/types.rs index 69d33bb023f3..3e36a83575c8 100644 --- a/substrate/frame/balances/src/types.rs +++ b/substrate/frame/balances/src/types.rs @@ -111,7 +111,7 @@ pub struct AccountData { const IS_NEW_LOGIC: u128 = 0x80000000_00000000_00000000_00000000u128; #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)] -pub struct ExtraFlags(u128); +pub struct ExtraFlags(pub(crate) u128); impl Default for ExtraFlags { fn default() -> Self { Self(IS_NEW_LOGIC) From c66d8a84687f5d68c0192122aa513b4b340794ca Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 26 Apr 2024 12:24:42 +0300 Subject: [PATCH 64/74] Bump bridges relay version + uncomment bridges zombeinet tests (#4289) TODOs: - [x] wait and see if test `1` works; - [x] ~think of whether we need remaining tests.~ I think we should keep it - will try to revive and update it --- .gitlab/pipeline/zombienet.yml | 4 +--- .gitlab/pipeline/zombienet/bridges.yml | 4 ++-- ...hen-idle.js => multiple-headers-synced.js} | 22 +++++-------------- .../rococo-to-westend.zndsl | 20 +++++++++++++++++ .../run.sh | 2 +- .../westend-to-rococo.zndsl | 20 +++++++++++++++++ .../rococo-to-westend.zndsl | 8 ------- .../westend-to-rococo.zndsl | 7 ------ ...ridges_zombienet_tests_injected.Dockerfile | 2 +- 9 files changed, 51 insertions(+), 38 deletions(-) rename bridges/testing/framework/js-helpers/{only-mandatory-headers-synced-when-idle.js => multiple-headers-synced.js} (61%) create mode 100644 bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl rename bridges/testing/tests/{0002-mandatory-headers-synced-while-idle => 0002-free-headers-synced-while-idle}/run.sh (90%) create mode 100644 bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl delete mode 100644 bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl delete mode 100644 bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index e306cb43c027..52948e1eb719 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -12,6 +12,4 @@ include: # polkadot tests - .gitlab/pipeline/zombienet/polkadot.yml # bridges tests - # TODO: https://github.com/paritytech/parity-bridges-common/pull/2884 - # commenting until we have a new relatye, compatible with updated fees scheme - # - .gitlab/pipeline/zombienet/bridges.yml + - .gitlab/pipeline/zombienet/bridges.yml diff --git a/.gitlab/pipeline/zombienet/bridges.yml b/.gitlab/pipeline/zombienet/bridges.yml index 4278f59b1e9a..9d7a8b931193 100644 --- a/.gitlab/pipeline/zombienet/bridges.yml +++ b/.gitlab/pipeline/zombienet/bridges.yml @@ -55,9 +55,9 @@ zombienet-bridges-0001-asset-transfer-works: - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0001-asset-transfer --docker - echo "Done" -zombienet-bridges-0002-mandatory-headers-synced-while-idle: +zombienet-bridges-0002-free-headers-synced-while-idle: extends: - .zombienet-bridges-common script: - - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-mandatory-headers-synced-while-idle --docker + - /home/nonroot/bridges-polkadot-sdk/bridges/testing/run-new-test.sh 0002-free-headers-synced-while-idle --docker - echo "Done" diff --git a/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js b/bridges/testing/framework/js-helpers/multiple-headers-synced.js similarity index 61% rename from bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js rename to bridges/testing/framework/js-helpers/multiple-headers-synced.js index 979179245ebe..a30efc821657 100644 --- a/bridges/testing/framework/js-helpers/only-mandatory-headers-synced-when-idle.js +++ b/bridges/testing/framework/js-helpers/multiple-headers-synced.js @@ -10,33 +10,23 @@ async function run(nodeName, networkInfo, args) { // start listening to new blocks let totalGrandpaHeaders = 0; - let initialParachainHeaderImported = false; + let totalParachainHeaders = 0; api.rpc.chain.subscribeNewHeads(async function (header) { const apiAtParent = await api.at(header.parentHash); const apiAtCurrent = await api.at(header.hash); const currentEvents = await apiAtCurrent.query.system.events(); - totalGrandpaHeaders += await utils.ensureOnlyMandatoryGrandpaHeadersImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - initialParachainHeaderImported = await utils.ensureOnlyInitialParachainHeaderImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); + totalGrandpaHeaders += await utils.countGrandpaHeaderImports(bridgedChain, currentEvents); + totalParachainHeaders += await utils.countParachainHeaderImports(bridgedChain, currentEvents); }); // wait given time await new Promise(resolve => setTimeout(resolve, exitAfterSeconds * 1000)); - // if we haven't seen any new GRANDPA or parachain headers => fail - if (totalGrandpaHeaders == 0) { + // if we haven't seen many (>1) new GRANDPA or parachain headers => fail + if (totalGrandpaHeaders <= 1) { throw new Error("No bridged relay chain headers imported"); } - if (!initialParachainHeaderImported) { + if (totalParachainHeaders <= 1) { throw new Error("No bridged parachain headers imported"); } } diff --git a/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl new file mode 100644 index 000000000000..0f779caa87cd --- /dev/null +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/rococo-to-westend.zndsl @@ -0,0 +1,20 @@ +Description: While relayer is idle, we only sync free Rococo (and a single Rococo BH) headers to Westend BH. +Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds + +# ensure that we have synced multiple relay and parachain headers while idle. This includes both +# headers that were generated while relay was offline and those in the next 100 seconds while script is active. +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/multiple-headers-synced.js with "300,rococo-at-westend" within 600 seconds + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh b/bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh similarity index 90% rename from bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh rename to bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh index 32419dc84f59..9d19a9688f94 100755 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/run.sh +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/run.sh @@ -22,7 +22,7 @@ echo # which is expected to be 60 seconds for the test environment. echo -e "Sleeping 90s before starting relayer ...\n" sleep 90 -${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir relayer_pid +${BASH_SOURCE%/*}/../../environments/rococo-westend/start_relayer.sh $rococo_dir $westend_dir finality_relayer_pid parachains_relayer_pid messages_relayer_pid run_zndsl ${BASH_SOURCE%/*}/rococo-to-westend.zndsl $westend_dir run_zndsl ${BASH_SOURCE%/*}/westend-to-rococo.zndsl $rococo_dir diff --git a/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl new file mode 100644 index 000000000000..7a6f1ec379d2 --- /dev/null +++ b/bridges/testing/tests/0002-free-headers-synced-while-idle/westend-to-rococo.zndsl @@ -0,0 +1,20 @@ +Description: While relayer is idle, we only sync free Westend (and a single Westend BH) headers to Rococo BH. +Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml +Creds: config + +# local chain spec gives `1u64 << 60` tokens to every endowed account: if it'll ever +# change, it'd need to be fixed here as well + +# //Charlie has inital balance +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave has inital balance +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds + +# ensure that we have synced multiple relay and parachain headers while idle. This includes both +# headers that were generated while relay was offline and those in the next 100 seconds while script is active. +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/multiple-headers-synced.js with "300,westend-at-rococo" within 600 seconds + +# //Charlie only submits free and mandatory relay chain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y" return is 1152921504606846976 within 30 seconds +# //Dave only submits free parachain headers, so the balance should stay the same +bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/native-asset-balance.js with "5DAAnrj7VHTznn2AWBemMuyBwZWs6FNFjdyVXUeYum3PTXFy" return is 1152921504606846976 within 30 seconds diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl deleted file mode 100644 index 6e381f537732..000000000000 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/rococo-to-westend.zndsl +++ /dev/null @@ -1,8 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH. -Network: {{ENV_PATH}}/bridge_hub_westend_local_network.toml -Creds: config - -# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-westend-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds - diff --git a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl b/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl deleted file mode 100644 index b4b3e4367916..000000000000 --- a/bridges/testing/tests/0002-mandatory-headers-synced-while-idle/westend-to-rococo.zndsl +++ /dev/null @@ -1,7 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH. -Network: {{ENV_PATH}}/bridge_hub_rococo_local_network.toml -Creds: config - -# ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# generated while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-rococo-collator1: js-script {{FRAMEWORK_PATH}}/js-helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 938f5cc45a11..196ba861f503 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.2.1 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.5.0 # metadata ARG VCS_REF From d212fc7a41fc72299913737c5fea2f3fcfe0a253 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Fri, 26 Apr 2024 13:24:03 +0200 Subject: [PATCH 65/74] review-bot: reverted #4271 and added `workflow_dispatch` (#4293) This PR includes two changes: - added `workflow_dispatch` to review bot - reverted #4271 ### Added `workflow_dispatch` to review bot This allows us, in the case that review-bot fails for some fork reasons, to trigger it manually ensuring that we can overcame the problem with the multiple actions while we look for a solution. image ### Reverted #4271 Unfortunately, the changes added in #4271 do not work in forks. Here is a lengthy discussion of many individuals facing the same problem as me: - [GitHub Action `pull_request` attribute empty in `workflow_run` event object for PR from forked repo #25220](https://github.com/orgs/community/discussions/25220) So I had to revert it (but I updated the dependencies to latest). #### Miscellaneous changes I added a debug log at the end of review bot in case it fails so we can easily debug it without having to make a lot of boilerplate and forks to duplicate the environment. --- .github/workflows/review-bot.yml | 19 ++++++++++++++++++- .github/workflows/review-trigger.yml | 13 +++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index fb877357b232..f1401406ae47 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -5,6 +5,12 @@ on: - Review-Trigger types: - completed + workflow_dispatch: + inputs: + pr-number: + description: "Number of the PR to evaluate" + required: true + type: number jobs: review-approvals: @@ -17,6 +23,12 @@ jobs: with: app-id: ${{ secrets.REVIEW_APP_ID }} private-key: ${{ secrets.REVIEW_APP_KEY }} + - name: Extract content of artifact + if: ${{ !inputs.pr-number }} + id: number + uses: Bullrich/extract-text-from-artifact@v1.0.1 + with: + artifact-name: pr_number - name: "Evaluates PR reviews and assigns reviewers" uses: paritytech/review-bot@v2.4.0 with: @@ -24,5 +36,10 @@ jobs: team-token: ${{ steps.app_token.outputs.token }} checks-token: ${{ steps.app_token.outputs.token }} # This is extracted from the triggering event - pr-number: ${{ github.event.workflow_run.pull_requests[0].number }} + pr-number: ${{ inputs.pr-number || steps.number.outputs.content }} request-reviewers: true + - name: Log payload + if: ${{ failure() || runner.debug }} + run: echo "::debug::$payload" + env: + payload: ${{ toJson(github.event) }} diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 6437be161d34..ec4a62afc0c7 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -58,3 +58,16 @@ jobs: env: GH_TOKEN: ${{ github.token }} COMMENTS: ${{ steps.comments.outputs.users }} + - name: Get PR number + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + echo "Saving PR number: $PR_NUMBER" + mkdir -p ./pr + echo $PR_NUMBER > ./pr/pr_number + - uses: actions/upload-artifact@v4 + name: Save PR number + with: + name: pr_number + path: pr/ + retention-days: 5 From 9a48cd707ed7f4034aadb8dc05065080ad102037 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 26 Apr 2024 14:26:05 +0300 Subject: [PATCH 66/74] Bridges: added helper function to relay single GRANDPA proof + header (#4307) related to https://github.com/paritytech/parity-bridges-common/issues/2962 silent, because the actual code for subcommand is added in the `parity-bridges-common` repo, where binary lives --------- Co-authored-by: Adrian Catangiu --- .../src/cli/relay_headers.rs | 39 ++++++++++++++++++- .../lib-substrate-relay/src/finality/mod.rs | 37 +++++++++++++++++- 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index cf1957c7323b..093f98ef21ed 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -19,7 +19,10 @@ use async_trait::async_trait; use structopt::StructOpt; -use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; +use relay_utils::{ + metrics::{GlobalMetrics, StandaloneMetric}, + UniqueSaturatedInto, +}; use crate::{ cli::{bridge::*, chain_schema::*, PrometheusParams}, @@ -48,6 +51,21 @@ pub struct RelayHeadersParams { prometheus_params: PrometheusParams, } +/// Single header relaying params. +#[derive(StructOpt)] +pub struct RelayHeaderParams { + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + /// Number of the source chain header that we want to relay. It must have a persistent + /// storage proof at the [`Self::source`] node, otherwise the command will fail. + #[structopt(long)] + number: u128, +} + impl RelayHeadersParams { fn headers_to_relay(&self) -> HeadersToRelay { match (self.only_mandatory_headers, self.only_free_headers) { @@ -89,4 +107,23 @@ pub trait HeadersRelayer: RelayToRelayHeadersCliBridge { ) .await } + + /// Relay single header. No checks are made to ensure that transaction will succeed. + async fn relay_header(data: RelayHeaderParams) -> anyhow::Result<()> { + let source_client = data.source.into_client::().await?; + let target_client = data.target.into_client::().await?; + let target_transactions_mortality = data.target_sign.target_transactions_mortality; + let target_sign = data.target_sign.to_keypair::()?; + + crate::finality::relay_single_header::( + source_client, + target_client, + crate::TransactionParams { + signer: target_sign, + mortality: target_transactions_mortality, + }, + data.number.unique_saturated_into(), + ) + .await + } } diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index a06857ae1d9b..0293e1da224a 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -25,13 +25,15 @@ use crate::{ use async_trait::async_trait; use bp_header_chain::justification::{GrandpaJustification, JustificationVerificationContext}; -use finality_relay::{FinalityPipeline, FinalitySyncPipeline, HeadersToRelay}; +use finality_relay::{ + FinalityPipeline, FinalitySyncPipeline, HeadersToRelay, SourceClient, TargetClient, +}; use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; use relay_substrate_client::{ transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, ChainWithTransactions, Client, HashOf, HeaderOf, SyncHeader, }; -use relay_utils::metrics::MetricsParams; +use relay_utils::{metrics::MetricsParams, TrackedTransactionStatus, TransactionTracker}; use sp_core::Pair; use std::{fmt::Debug, marker::PhantomData}; @@ -274,3 +276,34 @@ pub async fn run( .await .map_err(|e| anyhow::format_err!("{}", e)) } + +/// Relay single header. No checks are made to ensure that transaction will succeed. +pub async fn relay_single_header( + source_client: Client, + target_client: Client, + transaction_params: TransactionParams>, + header_number: BlockNumberOf, +) -> anyhow::Result<()> { + let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; + let Some(proof) = proof else { + return Err(anyhow::format_err!( + "Unable to submit {} header #{} to {}: no finality proof", + P::SourceChain::NAME, + header_number, + P::TargetChain::NAME, + )); + }; + + let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; + match tx_tracker.wait().await { + TrackedTransactionStatus::Finalized(_) => Ok(()), + TrackedTransactionStatus::Lost => Err(anyhow::format_err!( + "Transaction with {} header #{} is considered lost at {}", + P::SourceChain::NAME, + header_number, + P::TargetChain::NAME, + )), + } +} From 97f74253387ee43e30c25fd970b5ae4cc1a722d7 Mon Sep 17 00:00:00 2001 From: gui Date: Fri, 26 Apr 2024 21:27:14 +0900 Subject: [PATCH 67/74] Try state: log errors instead of loggin the number of error and discarding them (#4265) Currently we discard errors content We should at least log it. Code now is more similar to what is written in try_on_runtime_upgrade. label should be R0 --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Liam Aharon Co-authored-by: Javier Bullrich --- .../support/src/traits/try_runtime/mod.rs | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/substrate/frame/support/src/traits/try_runtime/mod.rs b/substrate/frame/support/src/traits/try_runtime/mod.rs index bec2dbf549a1..c1bf1feb19e5 100644 --- a/substrate/frame/support/src/traits/try_runtime/mod.rs +++ b/substrate/frame/support/src/traits/try_runtime/mod.rs @@ -161,22 +161,31 @@ impl TryState Ok(()), Select::All => { - let mut error_count = 0; + let mut errors = Vec::::new(); + for_tuples!(#( - if let Err(_) = Tuple::try_state(n.clone(), targets.clone()) { - error_count += 1; + if let Err(err) = Tuple::try_state(n.clone(), targets.clone()) { + errors.push(err); } )*); - if error_count > 0 { + if !errors.is_empty() { log::error!( target: "try-runtime", - "{} pallets exited with errors while executing try_state checks.", - error_count + "Detected errors while executing `try_state`:", ); + errors.iter().for_each(|err| { + log::error!( + target: "try-runtime", + "{:?}", + err + ); + }); + return Err( - "Detected errors while executing try_state checks. See logs for more info." + "Detected errors while executing `try_state` checks. See logs for more \ + info." .into(), ) } From 988e30f102b155ab68d664d62ac5c73da171659a Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Fri, 26 Apr 2024 16:28:08 +0300 Subject: [PATCH 68/74] Implementation of the new validator disabling strategy (#2226) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/paritytech/polkadot-sdk/issues/1966, https://github.com/paritytech/polkadot-sdk/issues/1963 and https://github.com/paritytech/polkadot-sdk/issues/1962. Disabling strategy specification [here](https://github.com/paritytech/polkadot-sdk/pull/2955). (Updated 13/02/2024) Implements: * validator disabling for a whole era instead of just a session * no more than 1/3 of the validators in the active set are disabled Removes: * `DisableStrategy` enum - now each validator committing an offence is disabled. * New era is not forced if too many validators are disabled. Before this PR not all offenders were disabled. A decision was made based on [`enum DisableStrategy`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/primitives/staking/src/offence.rs#L54). Some offenders were disabled for a whole era, some just for a session, some were not disabled at all. This PR changes the disabling behaviour. Now a validator committing an offense is disabled immediately till the end of the current era. Some implementation notes: * `OffendingValidators` in pallet session keeps all offenders (this is not changed). However its type is changed from `Vec<(u32, bool)>` to `Vec`. The reason is simple - each offender is getting disabled so the bool doesn't make sense anymore. * When a validator is disabled it is first added to `OffendingValidators` and then to `DisabledValidators`. This is done in [`add_offending_validator`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/slashing.rs#L325) from staking pallet. * In [`rotate_session`](https://github.com/paritytech/polkadot-sdk/blob/bdbe98297032e21a553bf191c530690b1d591405/substrate/frame/session/src/lib.rs#L623) the `end_session` also calls [`end_era`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/pallet/impls.rs#L490) when an era ends. In this case `OffendingValidators` are cleared **(1)**. * Then in [`rotate_session`](https://github.com/paritytech/polkadot-sdk/blob/bdbe98297032e21a553bf191c530690b1d591405/substrate/frame/session/src/lib.rs#L623) `DisabledValidators` are cleared **(2)** * And finally (still in `rotate_session`) a call to [`start_session`](https://github.com/paritytech/polkadot-sdk/blob/bbb6631641f9adba30c0ee6f4d11023a424dd362/substrate/frame/staking/src/pallet/impls.rs#L430) repopulates the disabled validators **(3)**. * The reason for this complication is that session pallet knows nothing abut eras. To overcome this on each new session the disabled list is repopulated (points 2 and 3). Staking pallet knows when a new era starts so with point 1 it ensures that the offenders list is cleared. --------- Co-authored-by: ordian Co-authored-by: ordian Co-authored-by: Maciej Co-authored-by: Gonçalo Pestana Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: command-bot <> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- .../parachains/src/disputes/slashing.rs | 11 +- polkadot/runtime/test-runtime/src/lib.rs | 3 +- polkadot/runtime/westend/src/lib.rs | 5 +- .../functional/0010-validator-disabling.toml | 2 +- prdoc/pr_2226.prdoc | 28 + substrate/bin/node/runtime/src/lib.rs | 3 +- substrate/frame/babe/src/mock.rs | 3 +- substrate/frame/beefy/src/mock.rs | 3 +- .../test-staking-e2e/src/lib.rs | 162 +--- .../test-staking-e2e/src/mock.rs | 27 +- substrate/frame/fast-unstake/src/mock.rs | 2 +- substrate/frame/grandpa/src/mock.rs | 3 +- substrate/frame/im-online/src/lib.rs | 6 +- substrate/frame/im-online/src/tests.rs | 3 - .../nomination-pools/benchmarking/src/mock.rs | 2 +- .../nomination-pools/test-staking/src/mock.rs | 2 +- .../frame/offences/benchmarking/src/mock.rs | 2 +- substrate/frame/offences/src/lib.rs | 1 - substrate/frame/offences/src/migration.rs | 9 +- substrate/frame/offences/src/mock.rs | 3 +- substrate/frame/root-offences/src/lib.rs | 4 +- substrate/frame/root-offences/src/mock.rs | 3 +- .../frame/session/benchmarking/src/mock.rs | 2 +- substrate/frame/session/src/lib.rs | 2 +- substrate/frame/staking/CHANGELOG.md | 19 + substrate/frame/staking/src/lib.rs | 76 ++ substrate/frame/staking/src/migrations.rs | 57 +- substrate/frame/staking/src/mock.rs | 22 +- substrate/frame/staking/src/pallet/impls.rs | 33 +- substrate/frame/staking/src/pallet/mod.rs | 35 +- substrate/frame/staking/src/slashing.rs | 80 +- substrate/frame/staking/src/tests.rs | 834 ++++++++++-------- substrate/primitives/staking/src/offence.rs | 32 - 33 files changed, 777 insertions(+), 702 deletions(-) create mode 100644 prdoc/pr_2226.prdoc diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index d0c74e4bc958..a61d0c899836 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -64,7 +64,7 @@ use sp_runtime::{ KeyTypeId, Perbill, }; use sp_session::{GetSessionNumber, GetValidatorCount}; -use sp_staking::offence::{DisableStrategy, Kind, Offence, OffenceError, ReportOffence}; +use sp_staking::offence::{Kind, Offence, OffenceError, ReportOffence}; use sp_std::{ collections::{btree_map::Entry, btree_set::BTreeSet}, prelude::*, @@ -134,15 +134,6 @@ where self.time_slot.clone() } - fn disable_strategy(&self) -> DisableStrategy { - match self.kind { - SlashingOffenceKind::ForInvalid => DisableStrategy::Always, - // in the future we might change it based on number of disputes initiated: - // - SlashingOffenceKind::AgainstValid => DisableStrategy::Never, - } - } - fn slash_fraction(&self, _offenders: u32) -> Perbill { self.slash_fraction } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 514643c0a201..d0f1ff0035fc 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -313,7 +313,6 @@ parameter_types! { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxExposurePageSize: u32 = 64; pub const MaxNominators: u32 = 256; - pub storage OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxAuthorities: u32 = 100_000; pub const OnChainMaxWinners: u32 = u32::MAX; // Unbounded number of election targets and voters. @@ -349,7 +348,6 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = onchain::OnChainExecution; @@ -364,6 +362,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7924939c79bd..03ecd5c070b2 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -613,7 +613,6 @@ parameter_types! { // this is an unbounded number. We just set it to a reasonably high value, 1 full page // of nominators. pub const MaxNominators: u32 = 64; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxNominations: u32 = ::LIMIT as u32; pub const MaxControllersInDeprecationBatch: u32 = 751; } @@ -634,7 +633,6 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; @@ -647,6 +645,7 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = runtime_common::StakingBenchmarkingConfig; type EventListeners = NominationPools; type WeightInfo = weights::pallet_staking::WeightInfo; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { @@ -1649,7 +1648,7 @@ pub mod migrations { } /// Unreleased migrations. Add new ones here: - pub type Unreleased = (); + pub type Unreleased = (pallet_staking::migrations::v15::MigrateV14ToV15,); } /// Unchecked extrinsic type as expected by this runtime. diff --git a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml index c9d79c5f8f23..806f34d7f767 100644 --- a/polkadot/zombienet_tests/functional/0010-validator-disabling.toml +++ b/polkadot/zombienet_tests/functional/0010-validator-disabling.toml @@ -21,7 +21,7 @@ requests = { memory = "2G", cpu = "1" } [[relaychain.node_groups]] name = "honest-validator" count = 3 - args = ["-lparachain=debug"] + args = ["-lparachain=debug,runtime::staking=debug"] [[relaychain.node_groups]] image = "{{MALUS_IMAGE}}" diff --git a/prdoc/pr_2226.prdoc b/prdoc/pr_2226.prdoc new file mode 100644 index 000000000000..f03540a50f6c --- /dev/null +++ b/prdoc/pr_2226.prdoc @@ -0,0 +1,28 @@ +title: Validator disabling strategy in runtime + +doc: + - audience: Node Operator + description: | + On each committed offence (no matter slashable or not) the offending validator will be + disabled for a whole era. + - audience: Runtime Dev + description: | + The disabling strategy in staking pallet is no longer hardcoded but abstracted away via + `DisablingStrategy` trait. The trait contains a single function (make_disabling_decision) which + is called for each offence. The function makes a decision if (and which) validators should be + disabled. A default implementation is provided - `UpToLimitDisablingStrategy`. It + will be used on Kusama and Polkadot. In nutshell `UpToLimitDisablingStrategy` + disables offenders up to the configured threshold. Offending validators are not disabled for + offences in previous eras. The threshold is controlled via `DISABLING_LIMIT_FACTOR` (a generic + parameter of `UpToLimitDisablingStrategy`). + +migrations: + db: [] + runtime: + - reference: pallet-staking + description: | + Renames `OffendingValidators` storage item to `DisabledValidators` and changes its type from + `Vec<(u32, bool)>` to `Vec`. + +crates: + - name: pallet-staking \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 43c617023bcb..0caaa8c73226 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -654,7 +654,6 @@ parameter_types! { pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominators: u32 = 64; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub const MaxControllersInDeprecationBatch: u32 = 5900; pub OffchainRepeat: BlockNumber = 5; pub HistoryDepth: u32 = 84; @@ -690,7 +689,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<256>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; @@ -703,6 +701,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_fast_unstake::Config for Runtime { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index ec54275278eb..395a86e65288 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -144,7 +144,6 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const SlashDeferDuration: EraIndex = 0; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(16); pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -174,7 +173,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -187,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 1c55adc8de4b..0b87de6bf5d7 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -158,7 +158,6 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -188,7 +187,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -201,6 +199,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs index 83083c912094..c00bb66ea130 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/lib.rs @@ -23,7 +23,6 @@ pub(crate) const LOG_TARGET: &str = "tests::e2e-epm"; use frame_support::{assert_err, assert_noop, assert_ok}; use mock::*; use sp_core::Get; -use sp_npos_elections::{to_supports, StakedAssignment}; use sp_runtime::Perbill; use crate::mock::RuntimeOrigin; @@ -127,75 +126,48 @@ fn offchainify_works() { } #[test] -/// Replicates the Kusama incident of 8th Dec 2022 and its resolution through the governance +/// Inspired by the Kusama incident of 8th Dec 2022 and its resolution through the governance /// fallback. /// -/// After enough slashes exceeded the `Staking::OffendingValidatorsThreshold`, the staking pallet -/// set `Forcing::ForceNew`. When a new session starts, staking will start to force a new era and -/// calls ::elect(). If at this point EPM and the staking miners did not -/// have enough time to queue a new solution (snapshot + solution submission), the election request -/// fails. If there is no election fallback mechanism in place, EPM enters in emergency mode. -/// Recovery: Once EPM is in emergency mode, subsequent calls to `elect()` will fail until a new -/// solution is added to EPM's `QueuedSolution` queue. This can be achieved through -/// `Call::set_emergency_election_result` or `Call::governance_fallback` dispatchables. Once a new -/// solution is added to the queue, EPM phase transitions to `Phase::Off` and the election flow -/// restarts. Note that in this test case, the emergency throttling is disabled. -fn enters_emergency_phase_after_forcing_before_elect() { +/// Mass slash of validators shouldn't disable more than 1/3 of them (the byzantine threshold). Also +/// no new era should be forced which could lead to EPM entering emergency mode. +fn mass_slash_doesnt_enter_emergency_phase() { let epm_builder = EpmExtBuilder::default().disable_emergency_throttling(); - let (ext, pool_state, _) = ExtBuilder::default().epm(epm_builder).build_offchainify(); - - execute_with(ext, || { - log!( - trace, - "current validators (staking): {:?}", - >::validators() - ); - let session_validators_before = Session::validators(); - - roll_to_epm_off(); - assert!(ElectionProviderMultiPhase::current_phase().is_off()); + let staking_builder = StakingExtBuilder::default().validator_count(7); + let (mut ext, _, _) = ExtBuilder::default() + .epm(epm_builder) + .staking(staking_builder) + .build_offchainify(); + ext.execute_with(|| { assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); - // slashes so that staking goes into `Forcing::ForceNew`. - slash_through_offending_threshold(); - assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::ForceNew); + let active_set_size_before_slash = Session::validators().len(); - advance_session_delayed_solution(pool_state.clone()); - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); - log_current_time(); + // Slash more than 1/3 of the active validators + let mut slashed = slash_half_the_active_set(); - let era_before_delayed_next = Staking::current_era(); - // try to advance 2 eras. - assert!(start_next_active_era_delayed_solution(pool_state.clone()).is_ok()); - assert_eq!(Staking::current_era(), era_before_delayed_next); - assert!(start_next_active_era(pool_state).is_err()); - assert_eq!(Staking::current_era(), era_before_delayed_next); + let active_set_size_after_slash = Session::validators().len(); - // EPM is still in emergency phase. - assert!(ElectionProviderMultiPhase::current_phase().is_emergency()); + // active set should stay the same before and after the slash + assert_eq!(active_set_size_before_slash, active_set_size_after_slash); - // session validator set remains the same. - assert_eq!(Session::validators(), session_validators_before); - - // performs recovery through the set emergency result. - let supports = to_supports(&vec![ - StakedAssignment { who: 21, distribution: vec![(21, 10)] }, - StakedAssignment { who: 31, distribution: vec![(21, 10), (31, 10)] }, - StakedAssignment { who: 41, distribution: vec![(41, 10)] }, - ]); - assert!(ElectionProviderMultiPhase::set_emergency_election_result( - RuntimeOrigin::root(), - supports - ) - .is_ok()); + // Slashed validators are disabled up to a limit + slashed.truncate( + pallet_staking::UpToLimitDisablingStrategy::::disable_limit( + active_set_size_after_slash, + ), + ); - // EPM can now roll to signed phase to proceed with elections. The validator set is the - // expected (ie. set through `set_emergency_election_result`). - roll_to_epm_signed(); - //assert!(ElectionProviderMultiPhase::current_phase().is_signed()); - assert_eq!(Session::validators(), vec![21, 31, 41]); - assert_eq!(Staking::current_era(), era_before_delayed_next.map(|e| e + 1)); + // Find the indices of the disabled validators + let active_set = Session::validators(); + let expected_disabled = slashed + .into_iter() + .map(|d| active_set.iter().position(|a| *a == d).unwrap() as u32) + .collect::>(); + + assert_eq!(pallet_staking::ForceEra::::get(), pallet_staking::Forcing::NotForcing); + assert_eq!(Session::disabled_validators(), expected_disabled); }); } @@ -253,77 +225,7 @@ fn continuous_slashes_below_offending_threshold() { } #[test] -/// Slashed validator sets intentions in the same era of slashing. -/// -/// When validators are slashed, they are chilled and removed from the current `VoterList`. Thus, -/// the slashed validator should not be considered in the next validator set. However, if the -/// slashed validator sets its intention to validate again in the same era when it was slashed and -/// chilled, the validator may not be removed from the active validator set across eras, provided -/// it would selected in the subsequent era if there was no slash. Nominators of the slashed -/// validator will also be slashed and chilled, as expected, but the nomination intentions will -/// remain after the validator re-set the intention to be validating again. -/// -/// This behaviour is due to removing implicit chill upon slash -/// . -/// -/// Related to . -fn set_validation_intention_after_chilled() { - use frame_election_provider_support::SortedListProvider; - use pallet_staking::{Event, Forcing, Nominators}; - - let (ext, pool_state, _) = ExtBuilder::default() - .epm(EpmExtBuilder::default()) - .staking(StakingExtBuilder::default()) - .build_offchainify(); - - execute_with(ext, || { - assert_eq!(active_era(), 0); - // validator is part of the validator set. - assert!(Session::validators().contains(&41)); - assert!(::VoterList::contains(&41)); - - // nominate validator 81. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(21), vec![41])); - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - - // validator is slashed. it is removed from the `VoterList` through chilling but in the - // current era, the validator is still part of the active validator set. - add_slash(&41); - assert!(Session::validators().contains(&41)); - assert!(!::VoterList::contains(&41)); - assert_eq!( - staking_events(), - [ - Event::Chilled { stash: 41 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 41, - slash_era: 0, - fraction: Perbill::from_percent(10) - } - ], - ); - - // after the nominator is slashed and chilled, the nominations remain. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - - // validator sets intention to stake again in the same era it was chilled. - assert_ok!(Staking::validate(RuntimeOrigin::signed(41), Default::default())); - - // progress era and check that the slashed validator is still part of the validator - // set. - assert!(start_next_active_era(pool_state).is_ok()); - assert_eq!(active_era(), 1); - assert!(Session::validators().contains(&41)); - assert!(::VoterList::contains(&41)); - - // nominations are still active as before the slash. - assert_eq!(Nominators::::get(21).unwrap().targets, vec![41]); - }) -} - -#[test] -/// Active ledger balance may fall below ED if account chills before unbonding. +/// Active ledger balance may fall below ED if account chills before unbounding. /// /// Unbonding call fails if the remaining ledger's stash balance falls below the existential /// deposit. However, if the stash is chilled before unbonding, the ledger's active balance may diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index a727e3bf8162..8f1775a7e595 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -35,7 +35,7 @@ use sp_runtime::{ transaction_validity, BuildStorage, PerU16, Perbill, Percent, }; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, SessionIndex, }; use sp_std::prelude::*; @@ -236,7 +236,6 @@ parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 2; pub static BondingDuration: sp_staking::EraIndex = 28; pub const SlashDeferDuration: sp_staking::EraIndex = 7; // 1/4 the bonding duration. - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(40); pub HistoryDepth: u32 = 84; } @@ -290,6 +289,8 @@ parameter_types! { /// Upper limit on the number of NPOS nominations. const MAX_QUOTA_NOMINATIONS: u32 = 16; +/// Disabling factor set explicitly to byzantine threshold +pub(crate) const SLASHING_DISABLING_FACTOR: usize = 3; impl pallet_staking::Config for Runtime { type Currency = Balances; @@ -308,7 +309,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = (); type NextNewSession = Session; type MaxExposurePageSize = ConstU32<256>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = BagsList; @@ -320,6 +320,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl frame_system::offchain::SendTransactionTypes for Runtime @@ -871,7 +872,6 @@ pub(crate) fn on_offence_now( offenders, slash_fraction, Staking::eras_start_session_index(now).unwrap(), - DisableStrategy::WhenSlashed, ); } @@ -886,19 +886,16 @@ pub(crate) fn add_slash(who: &AccountId) { ); } -// Slashes enough validators to cross the `Staking::OffendingValidatorsThreshold`. -pub(crate) fn slash_through_offending_threshold() { - let validators = Session::validators(); - let mut remaining_slashes = - ::OffendingValidatorsThreshold::get() * - validators.len() as u32; +// Slashes 1/2 of the active set. Returns the `AccountId`s of the slashed validators. +pub(crate) fn slash_half_the_active_set() -> Vec { + let mut slashed = Session::validators(); + slashed.truncate(slashed.len() / 2); - for v in validators.into_iter() { - if remaining_slashes != 0 { - add_slash(&v); - remaining_slashes -= 1; - } + for v in slashed.iter() { + add_slash(v); } + + slashed } // Slashes a percentage of the active nominators that haven't been slashed yet, with diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index b731cb822f33..d876f9f6171e 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -134,7 +134,6 @@ impl pallet_staking::Config for Runtime { type NextNewSession = (); type HistoryDepth = ConstU32<84>; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = MockElection; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -145,6 +144,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct BalanceToU256; diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 4a21da655e5b..2d54f525b1f0 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -146,7 +146,6 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub static ElectionsBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -176,7 +175,6 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; @@ -189,6 +187,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_offences::Config for Test { diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 239b47834d1f..f91a473e53d5 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -104,7 +104,7 @@ use sp_runtime::{ PerThing, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; use sp_staking::{ - offence::{DisableStrategy, Kind, Offence, ReportOffence}, + offence::{Kind, Offence, ReportOffence}, SessionIndex, }; use sp_std::prelude::*; @@ -847,10 +847,6 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn disable_strategy(&self) -> DisableStrategy { - DisableStrategy::Never - } - fn slash_fraction(&self, offenders: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index f9959593494a..12333d59ef89 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -50,9 +50,6 @@ fn test_unresponsiveness_slash_fraction() { dummy_offence.slash_fraction(17), Perbill::from_parts(46200000), // 4.62% ); - - // Offline offences should never lead to being disabled. - assert_eq!(dummy_offence.disable_strategy(), DisableStrategy::Never); } #[test] diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index a59f8f3f40e7..2752d53a6b9f 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -111,7 +111,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; @@ -124,6 +123,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index 2ec47e0d1645..93a05ddfae99 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -125,7 +125,6 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; @@ -138,6 +137,7 @@ impl pallet_staking::Config for Runtime { type EventListeners = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } parameter_types! { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 27129e73c71e..eeaa1364504a 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -174,7 +174,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; @@ -186,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_im_online::Config for Test { diff --git a/substrate/frame/offences/src/lib.rs b/substrate/frame/offences/src/lib.rs index 1c7ffeca7198..a328b2fee4e2 100644 --- a/substrate/frame/offences/src/lib.rs +++ b/substrate/frame/offences/src/lib.rs @@ -132,7 +132,6 @@ where &concurrent_offenders, &slash_perbill, offence.session_index(), - offence.disable_strategy(), ); // Deposit the event. diff --git a/substrate/frame/offences/src/migration.rs b/substrate/frame/offences/src/migration.rs index 3b5cf3ce9269..199f47491369 100644 --- a/substrate/frame/offences/src/migration.rs +++ b/substrate/frame/offences/src/migration.rs @@ -23,7 +23,7 @@ use frame_support::{ weights::Weight, Twox64Concat, }; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; +use sp_staking::offence::OnOffenceHandler; use sp_std::vec::Vec; #[cfg(feature = "try-runtime")] @@ -106,12 +106,7 @@ pub fn remove_deferred_storage() -> Weight { let deferred = >::take(); log::info!(target: LOG_TARGET, "have {} deferred offences, applying.", deferred.len()); for (offences, perbill, session) in deferred.iter() { - let consumed = T::OnOffenceHandler::on_offence( - offences, - perbill, - *session, - DisableStrategy::WhenSlashed, - ); + let consumed = T::OnOffenceHandler::on_offence(offences, perbill, *session); weight = weight.saturating_add(consumed); } diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 31d5f805f3e4..9a3120e41eaa 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -33,7 +33,7 @@ use sp_runtime::{ BuildStorage, Perbill, }; use sp_staking::{ - offence::{self, DisableStrategy, Kind, OffenceDetails}, + offence::{self, Kind, OffenceDetails}, SessionIndex, }; @@ -51,7 +51,6 @@ impl offence::OnOffenceHandler _offenders: &[OffenceDetails], slash_fraction: &[Perbill], _offence_session: SessionIndex, - _disable_strategy: DisableStrategy, ) -> Weight { OnOffencePerbill::mutate(|f| { *f = slash_fraction.to_vec(); diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index 24d259ed1d4a..6531080b8d10 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -33,7 +33,7 @@ use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; use sp_runtime::Perbill; -use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; +use sp_staking::offence::OnOffenceHandler; pub use pallet::*; @@ -128,7 +128,7 @@ pub mod pallet { T::AccountId, IdentificationTuple, Weight, - >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); + >>::on_offence(&offenders, &slash_fraction, session_index); } } } diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 626db138c2bf..7e7332c3f7e3 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -133,7 +133,6 @@ parameter_types! { pub static SlashDeferDuration: EraIndex = 0; pub const BondingDuration: EraIndex = 3; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } impl pallet_staking::Config for Test { @@ -153,7 +152,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; @@ -165,6 +163,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl pallet_session::historical::Config for Test { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 81052141fd86..6cefa8f39a8c 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -174,7 +174,6 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = ConstU32<64>; - type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; @@ -186,6 +185,7 @@ impl pallet_staking::Config for Test { type EventListeners = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } impl crate::Config for Test {} diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 17b6aa7a4640..9506e98adf7d 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -627,7 +627,7 @@ impl Pallet { Validators::::put(&validators); if changed { - // reset disabled validators + // reset disabled validators if active set was changed >::take(); } diff --git a/substrate/frame/staking/CHANGELOG.md b/substrate/frame/staking/CHANGELOG.md index 719aa388755f..113b7a6200b6 100644 --- a/substrate/frame/staking/CHANGELOG.md +++ b/substrate/frame/staking/CHANGELOG.md @@ -7,6 +7,25 @@ on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). We maintain a single integer version number for staking pallet to keep track of all storage migrations. +## [v15] + +### Added + +- New trait `DisablingStrategy` which is responsible for making a decision which offenders should be + disabled on new offence. +- Default implementation of `DisablingStrategy` - `UpToLimitDisablingStrategy`. It + disables each new offender up to a threshold (1/3 by default). Offenders are not runtime disabled for + offences in previous era(s). But they will be low-priority node-side disabled for dispute initiation. +- `OffendingValidators` storage item is replaced with `DisabledValidators`. The former keeps all + offenders and if they are disabled or not. The latter just keeps a list of all offenders as they + are disabled by default. + +### Deprecated + +- `enum DisableStrategy` is no longer needed because disabling is not related to the type of the + offence anymore. A decision if a offender is disabled or not is made by a `DisablingStrategy` + implementation. + ## [v14] ### Added diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index f5b7e3eca3de..047ad6b87cc1 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -1239,3 +1239,79 @@ impl BenchmarkingConfig for TestBenchmarkingConfig { type MaxValidators = frame_support::traits::ConstU32<100>; type MaxNominators = frame_support::traits::ConstU32<100>; } + +/// Controls validator disabling +pub trait DisablingStrategy { + /// Make a disabling decision. Returns the index of the validator to disable or `None` if no new + /// validator should be disabled. + fn decision( + offender_stash: &T::AccountId, + slash_era: EraIndex, + currently_disabled: &Vec, + ) -> Option; +} + +/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a +/// threshold. `DISABLING_LIMIT_FACTOR` is the factor of the maximum disabled validators in the +/// active set. E.g. setting this value to `3` means no more than 1/3 of the validators in the +/// active set can be disabled in an era. +/// By default a factor of 3 is used which is the byzantine threshold. +pub struct UpToLimitDisablingStrategy; + +impl UpToLimitDisablingStrategy { + /// Disabling limit calculated from the total number of validators in the active set. When + /// reached no more validators will be disabled. + pub fn disable_limit(validators_len: usize) -> usize { + validators_len + .saturating_sub(1) + .checked_div(DISABLING_LIMIT_FACTOR) + .unwrap_or_else(|| { + defensive!("DISABLING_LIMIT_FACTOR should not be 0"); + 0 + }) + } +} + +impl DisablingStrategy + for UpToLimitDisablingStrategy +{ + fn decision( + offender_stash: &T::AccountId, + slash_era: EraIndex, + currently_disabled: &Vec, + ) -> Option { + let active_set = T::SessionInterface::validators(); + + // We don't disable more than the limit + if currently_disabled.len() >= Self::disable_limit(active_set.len()) { + log!( + debug, + "Won't disable: reached disabling limit {:?}", + Self::disable_limit(active_set.len()) + ); + return None + } + + // We don't disable for offences in previous eras + if ActiveEra::::get().map(|e| e.index).unwrap_or_default() > slash_era { + log!( + debug, + "Won't disable: current_era {:?} > slash_era {:?}", + Pallet::::current_era().unwrap_or_default(), + slash_era + ); + return None + } + + let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) { + idx as u32 + } else { + log!(debug, "Won't disable: offender not in active set",); + return None + }; + + log!(debug, "Will disable {:?}", offender_idx); + + Some(offender_idx) + } +} diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index d5b18421d5b6..510252be26c9 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -20,9 +20,10 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::{ + migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, - traits::{GetStorageVersion, OnRuntimeUpgrade}, + traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}, }; #[cfg(feature = "try-runtime")] @@ -59,11 +60,61 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Migrating `OffendingValidators` from `Vec<(u32, bool)>` to `Vec` +pub mod v15 { + use super::*; + + // The disabling strategy used by staking pallet + type DefaultDisablingStrategy = UpToLimitDisablingStrategy; + + pub struct VersionUncheckedMigrateV14ToV15(sp_std::marker::PhantomData); + impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV14ToV15 { + fn on_runtime_upgrade() -> Weight { + let mut migrated = v14::OffendingValidators::::take() + .into_iter() + .filter(|p| p.1) // take only disabled validators + .map(|p| p.0) + .collect::>(); + + // Respect disabling limit + migrated.truncate(DefaultDisablingStrategy::disable_limit( + T::SessionInterface::validators().len(), + )); + + DisabledValidators::::set(migrated); + + log!(info, "v15 applied successfully."); + T::DbWeight::get().reads_writes(1, 1) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { + frame_support::ensure!( + v14::OffendingValidators::::decode_len().is_none(), + "OffendingValidators is not empty after the migration" + ); + Ok(()) + } + } + + pub type MigrateV14ToV15 = VersionedMigration< + 14, + 15, + VersionUncheckedMigrateV14ToV15, + Pallet, + ::DbWeight, + >; +} + /// Migration of era exposure storage items to paged exposures. /// Changelog: [v14.](https://github.com/paritytech/substrate/blob/ankan/paged-rewards-rebased2/frame/staking/CHANGELOG.md#14) pub mod v14 { use super::*; + #[frame_support::storage_alias] + pub(crate) type OffendingValidators = + StorageValue, Vec<(u32, bool)>, ValueQuery>; + pub struct MigrateToV14(core::marker::PhantomData); impl OnRuntimeUpgrade for MigrateToV14 { fn on_runtime_upgrade() -> Weight { @@ -73,10 +124,10 @@ pub mod v14 { if in_code == 14 && on_chain == 13 { in_code.put::>(); - log!(info, "v14 applied successfully."); + log!(info, "staking v14 applied successfully."); T::DbWeight::get().reads_writes(1, 1) } else { - log!(warn, "v14 not applied."); + log!(warn, "staking v14 not applied."); T::DbWeight::get().reads(1) } } diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index b46b863c016e..8c60dec65a81 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -34,7 +34,7 @@ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_io; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, OnStakingUpdate, }; @@ -186,7 +186,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; - pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } parameter_types! { @@ -267,6 +266,9 @@ impl OnStakingUpdate for EventListenerMock { } } +// Disabling threshold for `UpToLimitDisablingStrategy` +pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3; + impl crate::pallet::pallet::Config for Test { type Currency = Balances; type CurrencyBalance = ::Balance; @@ -284,7 +286,6 @@ impl crate::pallet::pallet::Config for Test { type EraPayout = ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = MaxExposurePageSize; - type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. @@ -297,6 +298,7 @@ impl crate::pallet::pallet::Config for Test { type EventListeners = EventListenerMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); + type DisablingStrategy = pallet_staking::UpToLimitDisablingStrategy; } pub struct WeightedNominationsQuota; @@ -461,6 +463,8 @@ impl ExtBuilder { (31, self.balance_factor * 2000), (41, self.balance_factor * 2000), (51, self.balance_factor * 2000), + (201, self.balance_factor * 2000), + (202, self.balance_factor * 2000), // optional nominator (100, self.balance_factor * 2000), (101, self.balance_factor * 2000), @@ -488,8 +492,10 @@ impl ExtBuilder { (31, 31, self.balance_factor * 500, StakerStatus::::Validator), // an idle validator (41, 41, self.balance_factor * 1000, StakerStatus::::Idle), - ]; - // optionally add a nominator + (51, 51, self.balance_factor * 1000, StakerStatus::::Idle), + (201, 201, self.balance_factor * 1000, StakerStatus::::Idle), + (202, 202, self.balance_factor * 1000, StakerStatus::::Idle), + ]; // optionally add a nominator if self.nominate { stakers.push(( 101, @@ -728,12 +734,11 @@ pub(crate) fn on_offence_in_era( >], slash_fraction: &[Perbill], era: EraIndex, - disable_strategy: DisableStrategy, ) { let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session, disable_strategy); + let _ = Staking::on_offence(offenders, slash_fraction, start_session); return } else if bonded_era > era { break @@ -745,7 +750,6 @@ pub(crate) fn on_offence_in_era( offenders, slash_fraction, Staking::eras_start_session_index(era).unwrap(), - disable_strategy, ); } else { panic!("cannot slash in era {}", era); @@ -760,7 +764,7 @@ pub(crate) fn on_offence_now( slash_fraction: &[Perbill], ) { let now = Staking::active_era().unwrap().index; - on_offence_in_era(offenders, slash_fraction, now, DisableStrategy::WhenSlashed) + on_offence_in_era(offenders, slash_fraction, now) } pub(crate) fn add_slash(who: &AccountId) { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 0c0ef0dbf463..f4d4a7133dd5 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -43,7 +43,7 @@ use sp_runtime::{ }; use sp_staking::{ currency_to_vote::CurrencyToVote, - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, OnStakingUpdate, Page, SessionIndex, Stake, StakingAccount::{self, Controller, Stash}, StakingInterface, @@ -505,10 +505,8 @@ impl Pallet { } // disable all offending validators that have been disabled for the whole era - for (index, disabled) in >::get() { - if disabled { - T::SessionInterface::disable_validator(index); - } + for index in >::get() { + T::SessionInterface::disable_validator(index); } } @@ -598,8 +596,8 @@ impl Pallet { >::insert(&active_era.index, validator_payout); T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); - // Clear offending validators. - >::kill(); + // Clear disabled validators. + >::kill(); } } @@ -868,14 +866,6 @@ impl Pallet { Self::deposit_event(Event::::ForceEra { mode }); } - /// Ensures that at the end of the current session there will be a new era. - pub(crate) fn ensure_new_era() { - match ForceEra::::get() { - Forcing::ForceAlways | Forcing::ForceNew => (), - _ => Self::set_force_era(Forcing::ForceNew), - } - } - #[cfg(feature = "runtime-benchmarks")] pub fn add_era_stakers( current_era: EraIndex, @@ -1447,7 +1437,6 @@ where >], slash_fraction: &[Perbill], slash_session: SessionIndex, - disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); let mut consumed_weight = Weight::from_parts(0, 0); @@ -1512,7 +1501,6 @@ where window_start, now: active_era, reward_proportion, - disable_strategy, }); Self::deposit_event(Event::::SlashReported { @@ -1986,7 +1974,8 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; Self::check_paged_exposures()?; - Self::check_count() + Self::check_count()?; + Self::ensure_disabled_validators_sorted() } /// Invariants: @@ -2300,4 +2289,12 @@ impl Pallet { Ok(()) } + + fn ensure_disabled_validators_sorted() -> Result<(), TryRuntimeError> { + ensure!( + DisabledValidators::::get().windows(2).all(|pair| pair[0] <= pair[1]), + "DisabledValidators is not sorted" + ); + Ok(()) + } } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 76ddad6f1359..9c968d883444 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -47,10 +47,11 @@ mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, - NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, - SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, DisablingStrategy, + EraPayout, EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, + MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, }; // The speculative number of spans are used as an input of the weight annotation of @@ -67,7 +68,7 @@ pub mod pallet { use super::*; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(14); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(15); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -217,10 +218,6 @@ pub mod pallet { #[pallet::constant] type MaxExposurePageSize: Get; - /// The fraction of the validator set that is safe to be offending. - /// After the threshold is reached a new era will be forced. - type OffendingValidatorsThreshold: Get; - /// Something that provides a best-effort sorted list of voters aka electing nominators, /// used for NPoS election. /// @@ -278,6 +275,9 @@ pub mod pallet { /// WARNING: this only reports slashing and withdraw events for the time being. type EventListeners: sp_staking::OnStakingUpdate>; + // `DisablingStragegy` controls how validators are disabled + type DisablingStrategy: DisablingStrategy; + /// Some parameters of the benchmarking. type BenchmarkingConfig: BenchmarkingConfig; @@ -654,19 +654,16 @@ pub mod pallet { #[pallet::getter(fn current_planned_session)] pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; - /// Indices of validators that have offended in the active era and whether they are currently - /// disabled. + /// Indices of validators that have offended in the active era. The offenders are disabled for a + /// whole era. For this reason they are kept here - only staking pallet knows about eras. The + /// implementor of [`DisablingStrategy`] defines if a validator should be disabled which + /// implicitly means that the implementor also controls the max number of disabled validators. /// - /// This value should be a superset of disabled validators since not all offences lead to the - /// validator being disabled (if there was no slash). This is needed to track the percentage of - /// validators that have offended in the current era, ensuring a new era is forced if - /// `OffendingValidatorsThreshold` is reached. The vec is always kept sorted so that we can find - /// whether a given validator has previously offended using binary search. It gets cleared when - /// the era ends. + /// The vec is always kept sorted so that we can find whether a given validator has previously + /// offended using binary search. #[pallet::storage] #[pallet::unbounded] - #[pallet::getter(fn offending_validators)] - pub type OffendingValidators = StorageValue<_, Vec<(u32, bool)>, ValueQuery>; + pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; /// The threshold for when users can start calling `chill_other` for other validators / /// nominators. The threshold is compared to the actual number of validators / nominators diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 2011e9eb8301..f831f625957d 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,21 +50,21 @@ //! Based on research at use crate::{ - BalanceOf, Config, Error, Exposure, NegativeImbalanceOf, NominatorSlashInEra, - OffendingValidators, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, + BalanceOf, Config, DisabledValidators, DisablingStrategy, Error, Exposure, NegativeImbalanceOf, + NominatorSlashInEra, Pallet, Perbill, SessionInterface, SpanSlash, UnappliedSlash, ValidatorSlashInEra, }; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Currency, Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, + traits::{Currency, Defensive, DefensiveSaturating, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, RuntimeDebug, }; -use sp_staking::{offence::DisableStrategy, EraIndex}; +use sp_staking::EraIndex; use sp_std::vec::Vec; /// The proportion of the slashing reward to be paid out on the first slashing detection. @@ -220,8 +220,6 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { /// The maximum percentage of a slash that ever gets paid out. /// This is f_inf in the paper. pub(crate) reward_proportion: Perbill, - /// When to disable offenders. - pub(crate) disable_strategy: DisableStrategy, } /// Computes a slash of a validator and nominators. It returns an unapplied @@ -280,18 +278,13 @@ pub(crate) fn compute_slash( let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); if target_span == Some(spans.span_index()) { - // misbehavior occurred within the current slashing span - take appropriate - // actions. - - // chill the validator - it misbehaved in the current span and should - // not continue in the next election. also end the slashing span. + // misbehavior occurred within the current slashing span - end current span. + // Check for details. spans.end_span(params.now); - >::chill_stash(params.stash); } } - let disable_when_slashed = params.disable_strategy != DisableStrategy::Never; - add_offending_validator::(params.stash, disable_when_slashed); + add_offending_validator::(¶ms); let mut nominators_slashed = Vec::new(); reward_payout += slash_nominators::(params.clone(), prior_slash_p, &mut nominators_slashed); @@ -320,54 +313,31 @@ fn kick_out_if_recent(params: SlashParams) { ); if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { + // Check https://github.com/paritytech/polkadot-sdk/issues/2650 for details spans.end_span(params.now); - >::chill_stash(params.stash); } - let disable_without_slash = params.disable_strategy == DisableStrategy::Always; - add_offending_validator::(params.stash, disable_without_slash); + add_offending_validator::(¶ms); } -/// Add the given validator to the offenders list and optionally disable it. -/// If after adding the validator `OffendingValidatorsThreshold` is reached -/// a new era will be forced. -fn add_offending_validator(stash: &T::AccountId, disable: bool) { - OffendingValidators::::mutate(|offending| { - let validators = T::SessionInterface::validators(); - let validator_index = match validators.iter().position(|i| i == stash) { - Some(index) => index, - None => return, - }; - - let validator_index_u32 = validator_index as u32; - - match offending.binary_search_by_key(&validator_index_u32, |(index, _)| *index) { - // this is a new offending validator - Err(index) => { - offending.insert(index, (validator_index_u32, disable)); - - let offending_threshold = - T::OffendingValidatorsThreshold::get() * validators.len() as u32; - - if offending.len() >= offending_threshold as usize { - // force a new era, to select a new validator set - >::ensure_new_era() - } - - if disable { - T::SessionInterface::disable_validator(validator_index_u32); - } - }, - Ok(index) => { - if disable && !offending[index].1 { - // the validator had previously offended without being disabled, - // let's make sure we disable it now - offending[index].1 = true; - T::SessionInterface::disable_validator(validator_index_u32); - } - }, +/// Inform the [`DisablingStrategy`] implementation about the new offender and disable the list of +/// validators provided by [`make_disabling_decision`]. +fn add_offending_validator(params: &SlashParams) { + DisabledValidators::::mutate(|disabled| { + if let Some(offender) = + T::DisablingStrategy::decision(params.stash, params.slash_era, &disabled) + { + // Add the validator to `DisabledValidators` and disable it. Do nothing if it is + // already disabled. + if let Err(index) = disabled.binary_search_by_key(&offender, |index| *index) { + disabled.insert(index, offender); + T::SessionInterface::disable_validator(offender); + } } }); + + // `DisabledValidators` should be kept sorted + debug_assert!(DisabledValidators::::get().windows(2).all(|pair| pair[0] < pair[1])); } /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 87f6fd424bd7..6cf5a56e5a6d 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -38,7 +38,7 @@ use sp_runtime::{ Perbill, Percent, Perquintill, Rounding, TokenError, }; use sp_staking::{ - offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, SessionIndex, }; use sp_std::prelude::*; @@ -716,56 +716,65 @@ fn nominating_and_rewards_should_work() { #[test] fn nominators_also_get_slashed_pro_rata() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), &11); - // 101 is a nominator for 11 - assert_eq!(initial_exposure.others.first().unwrap().who, 101); - - // staked values; - let nominator_stake = Staking::ledger(101.into()).unwrap().active; - let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11.into()).unwrap().active; - let validator_balance = balances(&11).0; - let exposed_stake = initial_exposure.total; - let exposed_validator = initial_exposure.own; - let exposed_nominator = initial_exposure.others.first().unwrap().value; - - // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + + // staked values; + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], + &[slash_percent], + ); - // both stakes must have been decreased. - assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); - assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); + // both stakes must have been decreased. + assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); + assert!(Staking::ledger(11.into()).unwrap().active < validator_stake); - let slash_amount = slash_percent * exposed_stake; - let validator_share = - Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = - Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; - // both slash amounts need to be positive for the test to make sense. - assert!(validator_share > 0); - assert!(nominator_share > 0); + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); - // both stakes must have been decreased pro-rata. - assert_eq!(Staking::ledger(101.into()).unwrap().active, nominator_stake - nominator_share); - assert_eq!(Staking::ledger(11.into()).unwrap().active, validator_stake - validator_share); - assert_eq!( - balances(&101).0, // free balance - nominator_balance - nominator_share, - ); - assert_eq!( - balances(&11).0, // free balance - validator_balance - validator_share, - ); - // Because slashing happened. - assert!(is_disabled(11)); - }); + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); + assert_eq!( + balances(&101).0, // free balance + nominator_balance - nominator_share, + ); + assert_eq!( + balances(&11).0, // free balance + validator_balance - validator_share, + ); + // Because slashing happened. + assert!(is_disabled(11)); + }); } #[test] @@ -2401,7 +2410,7 @@ fn era_is_always_same_length() { } #[test] -fn offence_forces_new_era() { +fn offence_doesnt_force_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { @@ -2411,7 +2420,7 @@ fn offence_forces_new_era() { &[Perbill::from_percent(5)], ); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2435,26 +2444,32 @@ fn offence_ensures_new_era_without_clobbering() { #[test] fn offence_deselects_validator_even_when_slash_is_zero() { - ExtBuilder::default().build_and_execute(|| { - assert!(Session::validators().contains(&11)); - assert!(>::contains_key(11)); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + assert!(Session::validators().contains(&11)); + assert!(>::contains_key(11)); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(is_disabled(11)); - mock::start_active_era(1); + mock::start_active_era(1); - assert!(!Session::validators().contains(&11)); - assert!(!>::contains_key(11)); - }); + // The validator should be reenabled in the new era + assert!(!is_disabled(11)); + }); } #[test] @@ -2479,71 +2494,70 @@ fn slashing_performed_according_exposure() { } #[test] -fn slash_in_old_span_does_not_deselect() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); - - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); +fn validator_is_not_disabled_for_an_offence_in_previous_era() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); - mock::start_active_era(2); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); - Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(!Session::validators().contains(&11)); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(is_disabled(11)); - mock::start_active_era(3); + mock::start_active_era(2); - // this staker is in a new slashing span now, having re-registered after - // their prior slash. + // the validator is not disabled in the new era + Staking::validate(RuntimeOrigin::signed(11), Default::default()).unwrap(); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - 1, - DisableStrategy::WhenSlashed, - ); + mock::start_active_era(3); - // the validator doesn't get chilled again - assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + // an offence committed in era 1 is reported in era 3 + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + 1, + ); - // but we are still forcing a new era - assert_eq!(Staking::force_era(), Forcing::ForceNew); + // the validator doesn't get disabled for an old offence + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - // NOTE: A 100% slash here would clean up the account, causing de-registration. - &[Perbill::from_percent(95)], - 1, - DisableStrategy::WhenSlashed, - ); + // and we are not forcing a new era + assert_eq!(Staking::force_era(), Forcing::NotForcing); - // the validator doesn't get chilled again - assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + // NOTE: A 100% slash here would clean up the account, causing de-registration. + &[Perbill::from_percent(95)], + 1, + ); - // but it's disabled - assert!(is_disabled(11)); - // and we are still forcing a new era - assert_eq!(Staking::force_era(), Forcing::ForceNew); - }); + // the validator doesn't get disabled again + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(!is_disabled(11)); + // and we are still not forcing a new era + assert_eq!(Staking::force_era(), Forcing::NotForcing); + }); } #[test] @@ -2671,7 +2685,7 @@ fn dont_slash_if_fraction_is_zero() { // The validator hasn't been slashed. The new era is not forced. assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); }); } @@ -2692,7 +2706,7 @@ fn only_slash_for_max_in_era() { // The validator has been slashed and has been force-chilled. assert_eq!(Balances::free_balance(11), 500); - assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_eq!(Staking::force_era(), Forcing::NotForcing); on_offence_now( &[OffenceDetails { @@ -2833,7 +2847,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(10)], 2, - DisableStrategy::WhenSlashed, ); assert_eq!(Balances::free_balance(11), 900); @@ -2860,7 +2873,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(30)], 3, - DisableStrategy::WhenSlashed, ); // 11 was not further slashed, but 21 and 101 were. @@ -2882,7 +2894,6 @@ fn slashing_nominators_by_span_max() { }], &[Perbill::from_percent(20)], 2, - DisableStrategy::WhenSlashed, ); // 11 was further slashed, but 21 and 101 were not. @@ -2999,11 +3010,8 @@ fn deferred_slashes_are_deferred() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, Event::SlashReported { validator: 11, slash_era: 1, .. }, Event::StakersElected, - Event::ForceEra { mode: Forcing::NotForcing }, .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } @@ -3029,7 +3037,6 @@ fn retroactive_deferred_slashes_two_eras_before() { &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 1, // should be deferred for two full eras, and applied at the beginning of era 4. - DisableStrategy::Never, ); mock::start_active_era(4); @@ -3037,8 +3044,6 @@ fn retroactive_deferred_slashes_two_eras_before() { assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, Event::SlashReported { validator: 11, slash_era: 1, .. }, .., Event::Slashed { staker: 11, amount: 100 }, @@ -3067,7 +3072,6 @@ fn retroactive_deferred_slashes_one_before() { &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], 2, // should be deferred for two full eras, and applied at the beginning of era 5. - DisableStrategy::Never, ); mock::start_active_era(4); @@ -3197,7 +3201,6 @@ fn remove_deferred() { &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], 1, - DisableStrategy::WhenSlashed, ); // fails if empty @@ -3312,192 +3315,198 @@ fn remove_multi_deferred() { #[test] fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - // pre-slash balance - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - // 100 has approval for 11 as of now - assert!(Staking::nominators(101).unwrap().targets.contains(&11)); + // pre-slash balance + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // 11 and 21 both have the support of 100 - let exposure_11 = Staking::eras_stakers(active_era(), &11); - let exposure_21 = Staking::eras_stakers(active_era(), &21); + // 100 has approval for 11 as of now + assert!(Staking::nominators(101).unwrap().targets.contains(&11)); - assert_eq!(exposure_11.total, 1000 + 125); - assert_eq!(exposure_21.total, 1000 + 375); + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::from_percent(10)], - ); + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(10), - slash_era: 1 - }, - Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, - ] - ); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); - // post-slash balance - let nominator_slash_amount_11 = 125 / 10; - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(10), + slash_era: 1 + }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, + ] + ); - // check that validator was chilled. - assert!(Validators::::iter().all(|(stash, _)| stash != 11)); + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); - // actually re-bond the slashed validator - assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); + // check that validator was disabled. + assert!(is_disabled(11)); - mock::start_active_era(2); - let exposure_11 = Staking::eras_stakers(active_era(), &11); - let exposure_21 = Staking::eras_stakers(active_era(), &21); + // actually re-bond the slashed validator + assert_ok!(Staking::validate(RuntimeOrigin::signed(11), Default::default())); - // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 - // 900 + 146 - assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); - // 1000 + 342 - assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); - assert_eq!(500 - 146 - 342, nominator_slash_amount_11); - }); + mock::start_active_era(2); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + // 11's own expo is reduced. sum of support from 11 is less (448), which is 500 + // 900 + 146 + assert!(matches!(exposure_11, Exposure { own: 900, total: 1046, .. })); + // 1000 + 342 + assert!(matches!(exposure_21, Exposure { own: 1000, total: 1342, .. })); + assert_eq!(500 - 146 - 342, nominator_slash_amount_11); + }); } #[test] -fn non_slashable_offence_doesnt_disable_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); +fn non_slashable_offence_disables_validator() { + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - // offence with no slash associated - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - ); + // offence with no slash associated + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); - // it does NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // it does NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond - on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(25)], - ); + // offence that slashes 25% of the bond + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); - // it DOES NOT affect the nominator. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // it DOES NOT affect the nominator. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(0), - slash_era: 1 - }, - Event::Chilled { stash: 21 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(25), - slash_era: 1 - }, - Event::Slashed { staker: 21, amount: 250 }, - Event::Slashed { staker: 101, amount: 94 } - ] - ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); - // the offence for validator 10 wasn't slashable so it wasn't disabled - assert!(!is_disabled(11)); - // whereas validator 20 gets disabled - assert!(is_disabled(21)); - }); + // the offence for validator 11 wasn't slashable but it is disabled + assert!(is_disabled(11)); + // validator 21 gets disabled too + assert!(is_disabled(21)); + }); } #[test] fn slashing_independent_of_disabling_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); + ExtBuilder::default() + .validator_count(5) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let now = Staking::active_era().unwrap().index; + let now = Staking::active_era().unwrap().index; - // offence with no slash associated, BUT disabling - on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - now, - DisableStrategy::Always, - ); + // offence with no slash associated + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + now, + ); - // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // offence that slashes 25% of the bond, BUT not disabling - on_offence_in_era( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], - &[Perbill::from_percent(25)], - now, - DisableStrategy::Never, - ); + // offence that slashes 25% of the bond + on_offence_in_era( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + now, + ); - // nomination remains untouched. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + // nomination remains untouched. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::StakersElected, - Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::Chilled { stash: 11 }, - Event::ForceEra { mode: Forcing::ForceNew }, - Event::SlashReported { - validator: 11, - fraction: Perbill::from_percent(0), - slash_era: 1 - }, - Event::Chilled { stash: 21 }, - Event::SlashReported { - validator: 21, - fraction: Perbill::from_percent(25), - slash_era: 1 - }, - Event::Slashed { staker: 21, amount: 250 }, - Event::Slashed { staker: 101, amount: 94 } - ] - ); + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, + Event::SlashReported { + validator: 11, + fraction: Perbill::from_percent(0), + slash_era: 1 + }, + Event::SlashReported { + validator: 21, + fraction: Perbill::from_percent(25), + slash_era: 1 + }, + Event::Slashed { staker: 21, amount: 250 }, + Event::Slashed { staker: 101, amount: 94 } + ] + ); - // the offence for validator 10 was explicitly disabled - assert!(is_disabled(11)); - // whereas validator 21 is explicitly not disabled - assert!(!is_disabled(21)); - }); + // first validator is disabled but not slashed + assert!(is_disabled(11)); + // second validator is slashed but not disabled + assert!(!is_disabled(21)); + }); } #[test] -fn offence_threshold_triggers_new_era() { +fn offence_threshold_doesnt_trigger_new_era() { ExtBuilder::default() .validator_count(4) .set_status(41, StakerStatus::Validator) @@ -3506,12 +3515,14 @@ fn offence_threshold_triggers_new_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); assert_eq!( - ::OffendingValidatorsThreshold::get(), - Perbill::from_percent(75), + UpToLimitDisablingStrategy::::disable_limit( + Session::validators().len() + ), + 1 ); - // we have 4 validators and an offending validator threshold of 75%, - // once the third validator commits an offence a new era should be forced + // we have 4 validators and an offending validator threshold of 1/3, + // even if the third validator commits an offence a new era should not be forced let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); @@ -3522,6 +3533,9 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); + // 11 should be disabled because the byzantine threshold is 1 + assert!(is_disabled(11)); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( @@ -3529,6 +3543,10 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); + // 21 should not be disabled because the number of disabled validators will be above the + // byzantine threshold + assert!(!is_disabled(21)); + assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( @@ -3536,28 +3554,29 @@ fn offence_threshold_triggers_new_era() { &[Perbill::zero()], ); - assert_eq!(ForceEra::::get(), Forcing::ForceNew); + // same for 31 + assert!(!is_disabled(31)); + + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); } #[test] fn disabled_validators_are_kept_disabled_for_whole_era() { ExtBuilder::default() - .validator_count(4) + .validator_count(7) .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) .build_and_execute(|| { mock::start_active_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], - &[Perbill::zero()], - ); - on_offence_now( &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3566,18 +3585,15 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { // nominations are not updated. assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - // validator 11 should not be disabled since the offence wasn't slashable - assert!(!is_disabled(11)); // validator 21 gets disabled since it got slashed assert!(is_disabled(21)); advance_session(); // disabled validators should carry-on through all sessions in the era - assert!(!is_disabled(11)); assert!(is_disabled(21)); - // validator 11 should now get disabled + // validator 11 commits an offence on_offence_now( &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], @@ -3687,27 +3703,34 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { #[test] fn zero_slash_keeps_nominators() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(active_era(), &11); - assert_eq!(Balances::free_balance(101), 2000); + let exposure = Staking::eras_stakers(active_era(), &11); + assert_eq!(Balances::free_balance(101), 2000); - on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], - &[Perbill::from_percent(0)], - ); + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(0)], + ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // 11 is still removed.. - assert!(Validators::::iter().all(|(stash, _)| stash != 11)); - // but their nominations are kept. - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - }); + // 11 is not removed but disabled + assert!(Validators::::iter().any(|(stash, _)| stash == 11)); + assert!(is_disabled(11)); + // and their nominations are kept. + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + }); } #[test] @@ -4710,7 +4733,7 @@ fn offences_weight_calculated_correctly() { let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); assert_eq!( - Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), + Staking::on_offence(&[], &[Perbill::from_percent(50)], 0), zero_offence_weight ); @@ -4735,7 +4758,6 @@ fn offences_weight_calculated_correctly() { &offenders, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed ), n_offence_unapplied_weight ); @@ -4765,7 +4787,6 @@ fn offences_weight_calculated_correctly() { &one_offender, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight ); @@ -7011,62 +7032,71 @@ mod staking_unchecked { #[test] fn virtual_nominators_are_lazily_slashed() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(1); - let slash_percent = Perbill::from_percent(5); - let initial_exposure = Staking::eras_stakers(active_era(), &11); - // 101 is a nominator for 11 - assert_eq!(initial_exposure.others.first().unwrap().who, 101); - // make 101 a virtual nominator - ::migrate_to_virtual_staker(&101); - // set payee different to self. - assert_ok!(::update_payee(&101, &102)); - - // cache values - let nominator_stake = Staking::ledger(101.into()).unwrap().active; - let nominator_balance = balances(&101).0; - let validator_stake = Staking::ledger(11.into()).unwrap().active; - let validator_balance = balances(&11).0; - let exposed_stake = initial_exposure.total; - let exposed_validator = initial_exposure.own; - let exposed_nominator = initial_exposure.others.first().unwrap().value; - - // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + ExtBuilder::default() + .validator_count(7) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .set_status(201, StakerStatus::Validator) + .set_status(202, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + let slash_percent = Perbill::from_percent(5); + let initial_exposure = Staking::eras_stakers(active_era(), &11); + // 101 is a nominator for 11 + assert_eq!(initial_exposure.others.first().unwrap().who, 101); + // make 101 a virtual nominator + ::migrate_to_virtual_staker(&101); + // set payee different to self. + assert_ok!(::update_payee(&101, &102)); + + // cache values + let nominator_stake = Staking::ledger(101.into()).unwrap().active; + let nominator_balance = balances(&101).0; + let validator_stake = Staking::ledger(11.into()).unwrap().active; + let validator_balance = balances(&11).0; + let exposed_stake = initial_exposure.total; + let exposed_validator = initial_exposure.own; + let exposed_nominator = initial_exposure.others.first().unwrap().value; + + // 11 goes offline + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); - let slash_amount = slash_percent * exposed_stake; - let validator_share = - Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = - Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; + let slash_amount = slash_percent * exposed_stake; + let validator_share = + Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; - // both slash amounts need to be positive for the test to make sense. - assert!(validator_share > 0); - assert!(nominator_share > 0); + // both slash amounts need to be positive for the test to make sense. + assert!(validator_share > 0); + assert!(nominator_share > 0); - // both stakes must have been decreased pro-rata. - assert_eq!( - Staking::ledger(101.into()).unwrap().active, - nominator_stake - nominator_share - ); - assert_eq!( - Staking::ledger(11.into()).unwrap().active, - validator_stake - validator_share - ); + // both stakes must have been decreased pro-rata. + assert_eq!( + Staking::ledger(101.into()).unwrap().active, + nominator_stake - nominator_share + ); + assert_eq!( + Staking::ledger(11.into()).unwrap().active, + validator_stake - validator_share + ); - // validator balance is slashed as usual - assert_eq!(balances(&11).0, validator_balance - validator_share); - // Because slashing happened. - assert!(is_disabled(11)); + // validator balance is slashed as usual + assert_eq!(balances(&11).0, validator_balance - validator_share); + // Because slashing happened. + assert!(is_disabled(11)); - // but virtual nominator's balance is not slashed. - assert_eq!(Balances::free_balance(&101), nominator_balance); - // but slash is broadcasted to slash observers. - assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); - }) + // but virtual nominator's balance is not slashed. + assert_eq!(Balances::free_balance(&101), nominator_balance); + // but slash is broadcasted to slash observers. + assert_eq!(SlashObserver::get().get(&101).unwrap(), &nominator_share); + }) } } mod ledger { @@ -7926,3 +7956,69 @@ mod ledger_recovery { }) } } + +mod byzantine_threshold_disabling_strategy { + use crate::{ + tests::Test, ActiveEra, ActiveEraInfo, DisablingStrategy, UpToLimitDisablingStrategy, + }; + use sp_staking::EraIndex; + + // Common test data - the stash of the offending validator, the era of the offence and the + // active set + const OFFENDER_ID: ::AccountId = 7; + const SLASH_ERA: EraIndex = 1; + const ACTIVE_SET: [::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7]; + const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set + + #[test] + fn dont_disable_for_ancient_offence() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + ActiveEra::::put(ActiveEraInfo { index: 2, start: None }); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disable_offender.is_none()); + }); + } + + #[test] + fn dont_disable_beyond_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![1, 2]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert!(disable_offender.is_none()); + }); + } + + #[test] + fn disable_when_below_byzantine_threshold() { + sp_io::TestExternalities::default().execute_with(|| { + let initially_disabled = vec![1]; + pallet_session::Validators::::put(ACTIVE_SET.to_vec()); + + let disable_offender = + >::decision( + &OFFENDER_ID, + SLASH_ERA, + &initially_disabled, + ); + + assert_eq!(disable_offender, Some(OFFENDER_VALIDATOR_IDX)); + }); + } +} diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 30d96d0cbafc..2c2ebc1fc971 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -37,29 +37,6 @@ pub type Kind = [u8; 16]; /// so that we can slash it accordingly. pub type OffenceCount = u32; -/// In case of an offence, which conditions get an offending validator disabled. -#[derive( - Clone, - Copy, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - Encode, - Decode, - sp_runtime::RuntimeDebug, - scale_info::TypeInfo, -)] -pub enum DisableStrategy { - /// Independently of slashing, this offence will not disable the offender. - Never, - /// Only disable the offender if it is also slashed. - WhenSlashed, - /// Independently of slashing, this offence will always disable the offender. - Always, -} - /// A trait implemented by an offence report. /// /// This trait assumes that the offence is legitimate and was validated already. @@ -102,11 +79,6 @@ pub trait Offence { /// number. Note that for GRANDPA the round number is reset each epoch. fn time_slot(&self) -> Self::TimeSlot; - /// In which cases this offence needs to disable offenders until the next era starts. - fn disable_strategy(&self) -> DisableStrategy { - DisableStrategy::WhenSlashed - } - /// A slash fraction of the total exposure that should be slashed for this /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. /// @@ -177,15 +149,12 @@ pub trait OnOffenceHandler { /// /// The `session` parameter is the session index of the offence. /// - /// The `disable_strategy` parameter decides if the offenders need to be disabled immediately. - /// /// The receiver might decide to not accept this offence. In this case, the call site is /// responsible for queuing the report and re-submitting again. fn on_offence( offenders: &[OffenceDetails], slash_fraction: &[Perbill], session: SessionIndex, - disable_strategy: DisableStrategy, ) -> Res; } @@ -194,7 +163,6 @@ impl OnOffenceHandler _offenders: &[OffenceDetails], _slash_fraction: &[Perbill], _session: SessionIndex, - _disable_strategy: DisableStrategy, ) -> Res { Default::default() } From d893cde2cfd1992a3e589614ae09088d92f28a59 Mon Sep 17 00:00:00 2001 From: Ron Date: Fri, 26 Apr 2024 23:51:58 +0800 Subject: [PATCH 69/74] Snowbridge: deposit extra fee to beneficiary on Asset Hub (#4175) Just the upper-stream for https://github.com/Snowfork/polkadot-sdk/pull/137 and more context there. --------- Co-authored-by: Clara van Staden Co-authored-by: Adrian Catangiu --- .../primitives/router/src/inbound/mod.rs | 6 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 133 +++++++++++++++++- prdoc/pr_4175.prdoc | 13 ++ 3 files changed, 149 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_4175.prdoc diff --git a/bridges/snowbridge/primitives/router/src/inbound/mod.rs b/bridges/snowbridge/primitives/router/src/inbound/mod.rs index c20554c6d184..54e47a7a8b6a 100644 --- a/bridges/snowbridge/primitives/router/src/inbound/mod.rs +++ b/bridges/snowbridge/primitives/router/src/inbound/mod.rs @@ -273,8 +273,10 @@ where }, None => { instructions.extend(vec![ - // Deposit asset to beneficiary. - DepositAsset { assets: Definite(asset.into()), beneficiary }, + // Deposit both asset and fees to beneficiary so the fees will not get + // trapped. Another benefit is when fees left more than ED on AssetHub could be + // used to create the beneficiary account in case it does not exist. + DepositAsset { assets: Wild(AllCounted(2)), beneficiary }, ]); }, } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index d0c02e611349..1c1c51404aa4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -27,7 +27,7 @@ use snowbridge_pallet_inbound_queue_fixtures::{ }; use snowbridge_pallet_system; use snowbridge_router_primitives::inbound::{ - Command, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, + Command, Destination, GlobalConsensusEthereumConvertsFor, MessageV1, VersionedMessage, }; use sp_core::H256; use sp_runtime::{DispatchError::Token, TokenError::FundsUnavailable}; @@ -40,6 +40,7 @@ const TREASURY_ACCOUNT: [u8; 32] = const WETH: [u8; 20] = hex!("87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d"); const ETHEREUM_DESTINATION_ADDRESS: [u8; 20] = hex!("44a57ee2f2FCcb85FDa2B0B18EBD0D8D2333700e"); const INSUFFICIENT_XCM_FEE: u128 = 1000; +const XCM_FEE: u128 = 4_000_000_000; #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum ControlCall { @@ -555,3 +556,133 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { ); }); } + +fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { + let weth_asset_location: Location = Location::new( + 2, + [EthereumNetwork::get().into(), AccountKey20 { network: None, key: WETH }], + ); + // (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }) + // Fund asset hub sovereign on bridge hub + let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( + 1, + [Parachain(AssetHubRococo::para_id().into())], + )); + BridgeHubRococo::fund_accounts(vec![(asset_hub_sovereign.clone(), INITIAL_FUND)]); + + // Register WETH + AssetHubRococo::execute_with(|| { + type RuntimeOrigin = ::RuntimeOrigin; + + assert_ok!(::ForeignAssets::force_create( + RuntimeOrigin::root(), + weth_asset_location.clone().try_into().unwrap(), + asset_hub_sovereign.into(), + false, + 1, + )); + + assert!(::ForeignAssets::asset_exists( + weth_asset_location.clone().try_into().unwrap(), + )); + }); + + // Send WETH to an existent account on asset hub + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + type EthereumInboundQueue = + ::EthereumInboundQueue; + let message_id: H256 = [0; 32].into(); + let message = VersionedMessage::V1(MessageV1 { + chain_id: CHAIN_ID, + command: Command::SendToken { + token: WETH.into(), + destination: Destination::AccountId32 { id: account_id }, + amount: 1_000_000, + fee, + }, + }); + let (xcm, _) = EthereumInboundQueue::do_convert(message_id, message).unwrap(); + assert_ok!(EthereumInboundQueue::send_xcm(xcm, AssetHubRococo::para_id().into())); + + // Check that the message was sent + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_existent_account_on_asset_hub() { + send_token_from_ethereum_to_asset_hub_with_fee(AssetHubRococoSender::get().into(), XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub() { + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the token was received and issued as a foreign asset on AssetHub + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficient_fee() { + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], INSUFFICIENT_XCM_FEE); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the message was not processed successfully due to insufficient fee + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {}, + ] + ); + }); +} + +#[test] +fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed( +) { + // On AH the xcm fee is 33_873_024 and the ED is 3_300_000 + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 36_000_000); + + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + // Check that the message was not processed successfully due to insufficient ED + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success:false, .. }) => {}, + ] + ); + }); +} diff --git a/prdoc/pr_4175.prdoc b/prdoc/pr_4175.prdoc new file mode 100644 index 000000000000..7fc2fb68b38e --- /dev/null +++ b/prdoc/pr_4175.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Snowbridge: deposit extra fee to beneficiary on Asset Hub" + +doc: + - audience: Runtime Dev + description: | + Snowbridge transfers arriving on Asset Hub will deposit both asset and fees to beneficiary so the fees will not get trapped. + Another benefit is when fees left more than ED, could be used to create the beneficiary account in case it does not exist on asset hub. + +crates: + - name: snowbridge-router-primitives From 2a497d297575947b613fe0f3bbac9273a48fd6b0 Mon Sep 17 00:00:00 2001 From: antiyro <74653697+antiyro@users.noreply.github.com> Date: Fri, 26 Apr 2024 18:23:58 +0200 Subject: [PATCH 70/74] fix(seal): shameless fix on sealing typo (#4304) --- substrate/client/consensus/manual-seal/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index c3d360f07197..8fc7e7ecab2f 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -86,7 +86,7 @@ where BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) } -/// Params required to start the instant sealing authorship task. +/// Params required to start the manual sealing authorship task. pub struct ManualSealParams, TP, SC, CS, CIDP, P> { /// Block import instance. pub block_import: BI, @@ -114,7 +114,7 @@ pub struct ManualSealParams, TP, SC, C pub create_inherent_data_providers: CIDP, } -/// Params required to start the manual sealing authorship task. +/// Params required to start the instant sealing authorship task. pub struct InstantSealParams, TP, SC, CIDP, P> { /// Block import instance for well. importing blocks. pub block_import: BI, From 73b9a8391fa0b18308fa35f905e31cec77f5618f Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sun, 28 Apr 2024 14:35:51 +0200 Subject: [PATCH 71/74] [Staking] Runtime api if era rewards are pending to be claimed (#4301) closes https://github.com/paritytech/polkadot-sdk/issues/426. related to https://github.com/paritytech/polkadot-sdk/pull/1189. Would help offchain programs to query if there are unclaimed pages of rewards for a given era. The logic could look like below ```js // loop as long as all era pages are claimed. while (api.call.stakingApi.pendingRewards(era, validator_stash)) { api.tx.staking.payout_stakers(validator_stash, era) } ``` --- polkadot/runtime/westend/src/lib.rs | 4 + prdoc/pr_4301.prdoc | 13 +++ substrate/bin/node/runtime/src/lib.rs | 4 + .../frame/staking/runtime-api/src/lib.rs | 5 +- substrate/frame/staking/src/lib.rs | 28 ++++- substrate/frame/staking/src/pallet/impls.rs | 4 + substrate/frame/staking/src/tests.rs | 107 ++++++++++++++++++ 7 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_4301.prdoc diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 03ecd5c070b2..de961bb4c398 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2250,6 +2250,10 @@ sp_api::impl_runtime_apis! { fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { Staking::api_eras_stakers_page_count(era, account) } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } } #[cfg(feature = "try-runtime")] diff --git a/prdoc/pr_4301.prdoc b/prdoc/pr_4301.prdoc new file mode 100644 index 000000000000..2ca2534243a8 --- /dev/null +++ b/prdoc/pr_4301.prdoc @@ -0,0 +1,13 @@ +title: New runtime api to check if a validator has pending pages of rewards for an era. + +doc: + - audience: + - Node Dev + - Runtime User + description: | + Creates a new runtime api to check if reward for an era is pending for a validator. Era rewards are paged and this + api will return true as long as there is one or more pages of era reward which are not claimed. + +crates: +- name: pallet-staking +- name: pallet-staking-runtime-api diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 0caaa8c73226..5d8016532a5d 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2791,6 +2791,10 @@ impl_runtime_apis! { fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page { Staking::api_eras_stakers_page_count(era, account) } + + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool { + Staking::api_pending_rewards(era, account) + } } impl sp_consensus_babe::BabeApi for Runtime { diff --git a/substrate/frame/staking/runtime-api/src/lib.rs b/substrate/frame/staking/runtime-api/src/lib.rs index b04c383a077d..7955f4184a43 100644 --- a/substrate/frame/staking/runtime-api/src/lib.rs +++ b/substrate/frame/staking/runtime-api/src/lib.rs @@ -30,7 +30,10 @@ sp_api::decl_runtime_apis! { /// Returns the nominations quota for a nominator with a given balance. fn nominations_quota(balance: Balance) -> u32; - /// Returns the page count of exposures for a validator in a given era. + /// Returns the page count of exposures for a validator `account` in a given era. fn eras_stakers_page_count(era: sp_staking::EraIndex, account: AccountId) -> sp_staking::Page; + + /// Returns true if validator `account` has pages to be claimed for the given era. + fn pending_rewards(era: sp_staking::EraIndex, account: AccountId) -> bool; } } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 047ad6b87cc1..692e62acfdff 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -1035,11 +1035,37 @@ where /// can and add more functions to it as needed. pub struct EraInfo(sp_std::marker::PhantomData); impl EraInfo { + /// Returns true if validator has one or more page of era rewards not claimed yet. + // Also looks at legacy storage that can be cleaned up after #433. + pub fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool { + let page_count = if let Some(overview) = >::get(&era, validator) { + overview.page_count + } else { + if >::contains_key(era, validator) { + // this means non paged exposure, and we treat them as single paged. + 1 + } else { + // if no exposure, then no rewards to claim. + return false + } + }; + + // check if era is marked claimed in legacy storage. + if >::get(validator) + .map(|l| l.legacy_claimed_rewards.contains(&era)) + .unwrap_or_default() + { + return false + } + + ClaimedRewards::::get(era, validator).len() < page_count as usize + } + /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be /// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards /// are relevant/claimable. - // Refer tracker issue for cleanup: #13034 + // Refer tracker issue for cleanup: https://github.com/paritytech/polkadot-sdk/issues/433 pub(crate) fn is_rewards_claimed_with_legacy_fallback( era: EraIndex, ledger: &StakingLedger, diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index f4d4a7133dd5..4eb24311ab34 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1183,6 +1183,10 @@ impl Pallet { pub fn api_eras_stakers_page_count(era: EraIndex, account: T::AccountId) -> Page { EraInfo::::get_page_count(era, &account) } + + pub fn api_pending_rewards(era: EraIndex, account: T::AccountId) -> bool { + EraInfo::::pending_rewards(era, &account) + } } impl ElectionDataProvider for Pallet { diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 6cf5a56e5a6d..d05752f54be7 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -6796,6 +6796,113 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( }); } +#[test] +fn test_runtime_api_pending_rewards() { + ExtBuilder::default().build_and_execute(|| { + // GIVEN + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + let stake = 100; + + // validator with non-paged exposure, rewards marked in legacy claimed rewards. + let validator_one = 301; + // validator with non-paged exposure, rewards marked in paged claimed rewards. + let validator_two = 302; + // validator with paged exposure. + let validator_three = 303; + + // Set staker + for v in validator_one..=validator_three { + let _ = Balances::make_free_balance_be(&v, stake); + assert_ok!(Staking::bond(RuntimeOrigin::signed(v), stake, RewardDestination::Staked)); + } + + // Add reward points + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(validator_one, 1), (validator_two, 1), (validator_three, 1)] + .into_iter() + .collect(), + }; + ErasRewardPoints::::insert(0, reward); + + // build exposure + let mut individual_exposures: Vec> = vec![]; + for i in 0..=MaxExposurePageSize::get() { + individual_exposures.push(IndividualExposure { who: i.into(), value: stake }); + } + let exposure = Exposure:: { + total: stake * (MaxExposurePageSize::get() as Balance + 2), + own: stake, + others: individual_exposures, + }; + + // add non-paged exposure for one and two. + >::insert(0, validator_one, exposure.clone()); + >::insert(0, validator_two, exposure.clone()); + // add paged exposure for third validator + EraInfo::::set_exposure(0, &validator_three, exposure); + + // add some reward to be distributed + ErasValidatorReward::::insert(0, 1000); + + // mark rewards claimed for validator_one in legacy claimed rewards + >::insert( + validator_one, + StakingLedgerInspect { + stash: validator_one, + total: stake, + active: stake, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![0], + }, + ); + + // SCENARIO ONE: rewards already marked claimed in legacy storage. + // runtime api should return false for pending rewards for validator_one. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + // and if we try to pay, we get an error. + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO TWO: non-paged exposure + // validator two has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_two)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO THREE: validator with paged exposure (two pages). + // validator three has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // validator three has two pages of exposure, so pending rewards is still true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // payout again + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // for eras with no exposure, pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + }); +} + mod staking_interface { use frame_support::storage::with_storage_layer; use sp_staking::StakingInterface; From 954150f3b5fdb7d07d1ed01b138e2025245bb227 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Sun, 28 Apr 2024 16:29:21 +0100 Subject: [PATCH 72/74] remove unnessisary use statements due to 2021 core prelude (#4183) Some traits are already included in the 2021 prelude and so shouldn't be needed to use explicitly: use `convert::TryFrom`, `convert::TryInto`, and `iter::FromIterator` are removed. ( https://doc.rust-lang.org/core/prelude/rust_2021/ ) No breaking changes or change of functionality, so I think no PR doc is needed in this case. (Motivation: Removes some references to `sp-std`) --- bridges/bin/runtime-common/src/messages.rs | 2 +- bridges/modules/grandpa/src/lib.rs | 2 +- bridges/primitives/runtime/src/chain.rs | 2 +- bridges/primitives/runtime/src/lib.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_lane.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_metrics.rs | 2 +- bridges/relays/lib-substrate-relay/src/messages_target.rs | 2 +- bridges/snowbridge/pallets/inbound-queue/src/envelope.rs | 2 +- bridges/snowbridge/pallets/inbound-queue/src/lib.rs | 2 +- bridges/snowbridge/primitives/beacon/src/bits.rs | 2 +- bridges/snowbridge/primitives/beacon/src/serde_utils.rs | 2 +- bridges/snowbridge/primitives/ethereum/src/header.rs | 2 +- bridges/snowbridge/primitives/ethereum/src/mpt.rs | 2 +- bridges/snowbridge/runtime/test-common/Cargo.toml | 2 +- cumulus/client/consensus/aura/src/collator.rs | 2 +- cumulus/client/consensus/aura/src/collators/basic.rs | 2 +- cumulus/client/consensus/aura/src/collators/lookahead.rs | 2 +- cumulus/client/network/src/lib.rs | 2 +- cumulus/parachains/pallets/collective-content/Cargo.toml | 2 +- polkadot/node/core/bitfield-signing/src/lib.rs | 2 +- polkadot/node/network/bitfield-distribution/src/tests.rs | 2 +- .../node/network/collator-protocol/src/collator_side/mod.rs | 1 - .../network/collator-protocol/src/validator_side/mod.rs | 2 -- .../network/statement-distribution/src/legacy_v1/tests.rs | 2 +- .../subsystem-types/src/messages/network_bridge_event.rs | 2 +- polkadot/xcm/src/v3/junction.rs | 1 - polkadot/xcm/src/v3/junctions.rs | 2 +- polkadot/xcm/src/v3/mod.rs | 6 +----- polkadot/xcm/src/v3/multiasset.rs | 5 +---- polkadot/xcm/src/v3/multilocation.rs | 6 +----- polkadot/xcm/src/v4/asset.rs | 5 +---- polkadot/xcm/src/v4/junction.rs | 1 - polkadot/xcm/src/v4/junctions.rs | 2 +- polkadot/xcm/src/v4/location.rs | 6 +----- polkadot/xcm/src/v4/mod.rs | 6 +----- polkadot/xcm/xcm-builder/src/tests/mod.rs | 1 - substrate/client/consensus/grandpa/rpc/src/lib.rs | 2 +- substrate/client/consensus/grandpa/src/environment.rs | 1 - substrate/client/mixnet/Cargo.toml | 2 +- .../network/src/protocol/notifications/upgrade/collec.rs | 1 - substrate/frame/Cargo.toml | 2 +- substrate/frame/alliance/src/benchmarking.rs | 6 +----- substrate/frame/alliance/src/lib.rs | 2 +- substrate/frame/alliance/src/mock.rs | 1 - substrate/frame/alliance/src/types.rs | 2 +- substrate/frame/examples/frame-crate/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/node-authorization/src/lib.rs | 2 +- substrate/frame/safe-mode/src/lib.rs | 1 - substrate/frame/sassafras/Cargo.toml | 2 +- substrate/frame/transaction-payment/rpc/src/lib.rs | 2 +- substrate/frame/tx-pause/src/lib.rs | 2 +- substrate/primitives/consensus/sassafras/Cargo.toml | 2 +- substrate/primitives/core/fuzz/Cargo.toml | 1 + substrate/primitives/mixnet/Cargo.toml | 2 +- substrate/primitives/state-machine/src/basic.rs | 1 - 56 files changed, 46 insertions(+), 82 deletions(-) diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs index 4aca53f3b983..0fe9935dbdb6 100644 --- a/bridges/bin/runtime-common/src/messages.rs +++ b/bridges/bin/runtime-common/src/messages.rs @@ -35,7 +35,7 @@ use frame_support::{traits::Get, weights::Weight}; use hash_db::Hasher; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryFrom, marker::PhantomData, vec::Vec}; +use sp_std::{marker::PhantomData, vec::Vec}; /// Bidirectional message bridge. pub trait MessageBridge { diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs index cb536eb07ff6..efcbfb1654b3 100644 --- a/bridges/modules/grandpa/src/lib.rs +++ b/bridges/modules/grandpa/src/lib.rs @@ -49,7 +49,7 @@ use sp_runtime::{ traits::{Header as HeaderT, Zero}, SaturatedConversion, }; -use sp_std::{boxed::Box, convert::TryInto, prelude::*}; +use sp_std::{boxed::Box, prelude::*}; mod call_ext; #[cfg(test)] diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index 1b1c623104f9..369386e41b0c 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, FixedPointOperand, }; -use sp_std::{convert::TryFrom, fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; +use sp_std::{fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; /// Chain call, that is either SCALE-encoded, or decoded. #[derive(Debug, Clone, PartialEq)] diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index c9c5c9412913..5daba0351ad4 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -31,7 +31,7 @@ use sp_runtime::{ traits::{BadOrigin, Header as HeaderT, UniqueSaturatedInto}, RuntimeDebug, }; -use sp_std::{convert::TryFrom, fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; +use sp_std::{fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; pub use chain::{ AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index abeab8c1402d..58e9ded312df 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -46,7 +46,7 @@ use relay_utils::{ }; use sp_core::Pair; use sp_runtime::traits::Zero; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData}; /// Substrate -> Substrate messages synchronization pipeline. pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs index 27bf6186c3ba..b30e75bd8bac 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs @@ -32,7 +32,7 @@ use relay_substrate_client::{ use relay_utils::metrics::{MetricsParams, StandaloneMetric}; use sp_core::storage::StorageData; use sp_runtime::{FixedPointNumber, FixedU128}; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; +use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 9396e785530d..633b11f0b802 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -45,7 +45,7 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; -use std::{convert::TryFrom, ops::RangeInclusive}; +use std::ops::RangeInclusive; /// Message receiving proof returned by the target Substrate node. pub type SubstrateMessagesDeliveryProof = diff --git a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs index 826d535c2cb9..31a8992442d8 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/envelope.rs @@ -3,7 +3,7 @@ use snowbridge_core::{inbound::Log, ChannelId}; use sp_core::{RuntimeDebug, H160, H256}; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_std::prelude::*; use alloy_primitives::B256; use alloy_sol_types::{sol, SolEvent}; diff --git a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs index 8acbb0c2916e..4a1486204eb0 100644 --- a/bridges/snowbridge/pallets/inbound-queue/src/lib.rs +++ b/bridges/snowbridge/pallets/inbound-queue/src/lib.rs @@ -50,7 +50,7 @@ use frame_system::ensure_signed; use scale_info::TypeInfo; use sp_core::{H160, H256}; use sp_runtime::traits::Zero; -use sp_std::{convert::TryFrom, vec}; +use sp_std::vec; use xcm::prelude::{ send_xcm, Instruction::SetTopic, Junction::*, Location, SendError as XcmpSendError, SendXcm, Xcm, XcmContext, XcmHash, diff --git a/bridges/snowbridge/primitives/beacon/src/bits.rs b/bridges/snowbridge/primitives/beacon/src/bits.rs index 72b7135ee293..fb03588cf8b7 100644 --- a/bridges/snowbridge/primitives/beacon/src/bits.rs +++ b/bridges/snowbridge/primitives/beacon/src/bits.rs @@ -1,6 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: 2023 Snowfork -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; use ssz_rs::{Bitvector, Deserialize}; pub fn decompress_sync_committee_bits< diff --git a/bridges/snowbridge/primitives/beacon/src/serde_utils.rs b/bridges/snowbridge/primitives/beacon/src/serde_utils.rs index 07f5cbe724ed..5e39ff912257 100644 --- a/bridges/snowbridge/primitives/beacon/src/serde_utils.rs +++ b/bridges/snowbridge/primitives/beacon/src/serde_utils.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Deserializer}; // helper to deserialize arbitrary arrays like [T; N] pub mod arrays { - use std::{convert::TryInto, marker::PhantomData}; + use std::marker::PhantomData; use serde::{ de::{SeqAccess, Visitor}, diff --git a/bridges/snowbridge/primitives/ethereum/src/header.rs b/bridges/snowbridge/primitives/ethereum/src/header.rs index f0b51f8c79de..48fa179fe4fa 100644 --- a/bridges/snowbridge/primitives/ethereum/src/header.rs +++ b/bridges/snowbridge/primitives/ethereum/src/header.rs @@ -8,7 +8,7 @@ use rlp::RlpStream; use scale_info::TypeInfo; use sp_io::hashing::keccak_256; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; diff --git a/bridges/snowbridge/primitives/ethereum/src/mpt.rs b/bridges/snowbridge/primitives/ethereum/src/mpt.rs index 9a2dae486dcc..0365f5e994fe 100644 --- a/bridges/snowbridge/primitives/ethereum/src/mpt.rs +++ b/bridges/snowbridge/primitives/ethereum/src/mpt.rs @@ -3,7 +3,7 @@ //! Helper types to work with Ethereum's Merkle Patricia Trie nodes use ethereum_types::H256; -use sp_std::{convert::TryFrom, prelude::*}; +use sp_std::prelude::*; pub trait Node { fn contains_hash(&self, hash: H256) -> bool; diff --git a/bridges/snowbridge/runtime/test-common/Cargo.toml b/bridges/snowbridge/runtime/test-common/Cargo.toml index 92970339fac0..7cbb38574034 100644 --- a/bridges/snowbridge/runtime/test-common/Cargo.toml +++ b/bridges/snowbridge/runtime/test-common/Cargo.toml @@ -3,7 +3,7 @@ name = "snowbridge-runtime-test-common" description = "Snowbridge Runtime Tests" version = "0.2.0" authors = ["Snowfork "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" categories = ["cryptography::cryptocurrencies"] diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 5b7669c88f47..776052215d93 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -55,7 +55,7 @@ use sp_runtime::{ }; use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, error::Error, time::Duration}; +use std::{error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. pub struct Params { diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index a4c22a45266c..1047c6219ad1 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -48,7 +48,7 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_state_machine::Backend as _; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator as collator_util; diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 3fe87e94b7b9..09416233ea9b 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -67,7 +67,7 @@ use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; use sp_timestamp::Timestamp; -use std::{convert::TryFrom, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use crate::collator::{self as collator_util, SlotClaim}; diff --git a/cumulus/client/network/src/lib.rs b/cumulus/client/network/src/lib.rs index ebd557b805c5..f442ed5840bd 100644 --- a/cumulus/client/network/src/lib.rs +++ b/cumulus/client/network/src/lib.rs @@ -36,7 +36,7 @@ use polkadot_primitives::{ use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, future::FutureExt, Future}; -use std::{convert::TryFrom, fmt, marker::PhantomData, pin::Pin, sync::Arc}; +use std::{fmt, marker::PhantomData, pin::Pin, sync::Arc}; #[cfg(test)] mod tests; diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index b3fac47cb4ae..207259bee52c 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-collective-content" version = "0.6.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true description = "Managed content" license = "Apache-2.0" diff --git a/polkadot/node/core/bitfield-signing/src/lib.rs b/polkadot/node/core/bitfield-signing/src/lib.rs index 0fc0bb3d2788..89851c4a033b 100644 --- a/polkadot/node/core/bitfield-signing/src/lib.rs +++ b/polkadot/node/core/bitfield-signing/src/lib.rs @@ -38,7 +38,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{self as util, Validator}; use polkadot_primitives::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; use sp_keystore::{Error as KeystoreError, KeystorePtr}; -use std::{collections::HashMap, iter::FromIterator, time::Duration}; +use std::{collections::HashMap, time::Duration}; use wasm_timer::{Delay, Instant}; mod metrics; diff --git a/polkadot/node/network/bitfield-distribution/src/tests.rs b/polkadot/node/network/bitfield-distribution/src/tests.rs index 188b51ebccca..dc37f73ec8a1 100644 --- a/polkadot/node/network/bitfield-distribution/src/tests.rs +++ b/polkadot/node/network/bitfield-distribution/src/tests.rs @@ -40,7 +40,7 @@ use sp_core::Pair as PairT; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystorePtr}; -use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; const TIMEOUT: Duration = Duration::from_millis(50); macro_rules! launch { diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index e6aa55235b7a..879caf923285 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -16,7 +16,6 @@ use std::{ collections::{HashMap, HashSet}, - convert::TryInto, time::Duration, }; diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index f7b07133bff4..ac8c060827f5 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -20,9 +20,7 @@ use futures::{ use futures_timer::Delay; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - convert::TryInto, future::Future, - iter::FromIterator, time::{Duration, Instant}, }; use tokio_util::sync::CancellationToken; diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 0dea5ad0996e..d4c5f95034ae 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -55,7 +55,7 @@ use sp_application_crypto::{sr25519::Pair, AppCrypto, Pair as TraitPair}; use sp_authority_discovery::AuthorityPair; use sp_keyring::Sr25519Keyring; use sp_keystore::{Keystore, KeystorePtr}; -use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use util::reputation::add_reputation; // Some deterministic genesis hash for protocol names diff --git a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs index fa2c7687b38a..29798c785b9c 100644 --- a/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/polkadot/node/subsystem-types/src/messages/network_bridge_event.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{collections::HashSet, convert::TryFrom}; +use std::collections::HashSet; pub use sc_network::ReputationChange; pub use sc_network_types::PeerId; diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index e9e51941b1ac..32ce352c5c02 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -26,7 +26,6 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use core::convert::{TryFrom, TryInto}; use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v3/junctions.rs b/polkadot/xcm/src/v3/junctions.rs index 9748e81fa55f..7b014304fdaf 100644 --- a/polkadot/xcm/src/v3/junctions.rs +++ b/polkadot/xcm/src/v3/junctions.rs @@ -17,7 +17,7 @@ //! XCM `Junctions`/`InteriorMultiLocation` datatype. use super::{Junction, MultiLocation, NetworkId}; -use core::{convert::TryFrom, mem, result}; +use core::{mem, result}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index d4e2da07a25a..e7c57f414eb7 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -29,11 +29,7 @@ use super::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - result, -}; +use core::{fmt::Debug, result}; use derivative::Derivative; use parity_scale_codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 0662077b19d0..9a67b0e4986c 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -42,10 +42,7 @@ use crate::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; -use core::{ - cmp::Ordering, - convert::{TryFrom, TryInto}, -}; +use core::cmp::Ordering; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 18fe01ec8fa7..731e277b29d8 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -20,10 +20,7 @@ use super::{Junction, Junctions}; use crate::{ v2::MultiLocation as OldMultiLocation, v4::Location as NewMultiLocation, VersionedLocation, }; -use core::{ - convert::{TryFrom, TryInto}, - result, -}; +use core::result; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -766,7 +763,6 @@ mod tests { #[test] fn conversion_from_other_types_works() { use crate::v2; - use core::convert::TryInto; fn takes_multilocation>(_arg: Arg) {} diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index 8abd8f9f8fd0..6b6d200f32fe 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -34,10 +34,7 @@ use crate::v3::{ }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; -use core::{ - cmp::Ordering, - convert::{TryFrom, TryInto}, -}; +use core::cmp::Ordering; use parity_scale_codec::{self as codec, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v4/junction.rs b/polkadot/xcm/src/v4/junction.rs index b5d10484aa02..3ae97de5e9b8 100644 --- a/polkadot/xcm/src/v4/junction.rs +++ b/polkadot/xcm/src/v4/junction.rs @@ -23,7 +23,6 @@ use crate::{ VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; -use core::convert::TryFrom; use parity_scale_codec::{self, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; diff --git a/polkadot/xcm/src/v4/junctions.rs b/polkadot/xcm/src/v4/junctions.rs index 48712dd74c6c..6d1af59e13dc 100644 --- a/polkadot/xcm/src/v4/junctions.rs +++ b/polkadot/xcm/src/v4/junctions.rs @@ -18,7 +18,7 @@ use super::{Junction, Location, NetworkId}; use alloc::sync::Arc; -use core::{convert::TryFrom, mem, ops::Range, result}; +use core::{mem, ops::Range, result}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index 9275bfdb9492..cee76b689407 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -18,10 +18,7 @@ use super::{traits::Reanchorable, Junction, Junctions}; use crate::{v3::MultiLocation as OldLocation, VersionedLocation}; -use core::{ - convert::{TryFrom, TryInto}, - result, -}; +use core::result; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -723,7 +720,6 @@ mod tests { #[test] fn conversion_from_other_types_works() { use crate::v3; - use core::convert::TryInto; fn takes_location>(_arg: Arg) {} diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index 30ee485589a2..77b6d915fcb5 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -24,11 +24,7 @@ use super::v3::{ use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; use bounded_collections::{parameter_types, BoundedVec}; -use core::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - result, -}; +use core::{fmt::Debug, result}; use derivative::Derivative; use parity_scale_codec::{ self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, diff --git a/polkadot/xcm/xcm-builder/src/tests/mod.rs b/polkadot/xcm/xcm-builder/src/tests/mod.rs index 63d254a10675..16ce3d2cf8ff 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mod.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mod.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use super::{test_utils::*, *}; -use core::convert::TryInto; use frame_support::{ assert_err, traits::{ConstU32, ContainsPair, ProcessMessageError}, diff --git a/substrate/client/consensus/grandpa/rpc/src/lib.rs b/substrate/client/consensus/grandpa/rpc/src/lib.rs index 0557eab93e29..68de068c3058 100644 --- a/substrate/client/consensus/grandpa/rpc/src/lib.rs +++ b/substrate/client/consensus/grandpa/rpc/src/lib.rs @@ -125,7 +125,7 @@ where #[cfg(test)] mod tests { use super::*; - use std::{collections::HashSet, convert::TryInto, sync::Arc}; + use std::{collections::HashSet, sync::Arc}; use jsonrpsee::{core::EmptyServerParams as EmptyParams, types::SubscriptionId, RpcModule}; use parity_scale_codec::{Decode, Encode}; diff --git a/substrate/client/consensus/grandpa/src/environment.rs b/substrate/client/consensus/grandpa/src/environment.rs index d3e2beb84e79..31df038044a4 100644 --- a/substrate/client/consensus/grandpa/src/environment.rs +++ b/substrate/client/consensus/grandpa/src/environment.rs @@ -18,7 +18,6 @@ use std::{ collections::{BTreeMap, HashMap}, - iter::FromIterator, marker::PhantomData, pin::Pin, sync::Arc, diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 65b81bda4b08..e605a06c9d9c 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "sc-mixnet" version = "0.4.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs index 791821b3f75d..33c090ae50e9 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs @@ -19,7 +19,6 @@ use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; use std::{ - iter::FromIterator, pin::Pin, task::{Context, Poll}, vec, diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 84bab86581ca..ef8d8758f3df 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -2,7 +2,7 @@ name = "polkadot-sdk-frame" version = "0.1.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "paritytech.github.io" repository.workspace = true diff --git a/substrate/frame/alliance/src/benchmarking.rs b/substrate/frame/alliance/src/benchmarking.rs index 710c32a848dd..09e2045555b6 100644 --- a/substrate/frame/alliance/src/benchmarking.rs +++ b/substrate/frame/alliance/src/benchmarking.rs @@ -19,11 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use core::{ - cmp, - convert::{TryFrom, TryInto}, - mem::size_of, -}; +use core::{cmp, mem::size_of}; use sp_runtime::traits::{Bounded, Hash, StaticLookup}; use frame_benchmarking::{account, v2::*, BenchmarkError}; diff --git a/substrate/frame/alliance/src/lib.rs b/substrate/frame/alliance/src/lib.rs index 1f06241e9c83..ed771c7226ea 100644 --- a/substrate/frame/alliance/src/lib.rs +++ b/substrate/frame/alliance/src/lib.rs @@ -101,7 +101,7 @@ use sp_runtime::{ traits::{Dispatchable, Saturating, StaticLookup, Zero}, DispatchError, RuntimeDebug, }; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; use frame_support::{ dispatch::{DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, diff --git a/substrate/frame/alliance/src/mock.rs b/substrate/frame/alliance/src/mock.rs index b183e412bed7..7116e69efa17 100644 --- a/substrate/frame/alliance/src/mock.rs +++ b/substrate/frame/alliance/src/mock.rs @@ -17,7 +17,6 @@ //! Test utilities -use core::convert::{TryFrom, TryInto}; pub use sp_core::H256; use sp_runtime::traits::Hash; pub use sp_runtime::{ diff --git a/substrate/frame/alliance/src/types.rs b/substrate/frame/alliance/src/types.rs index 784993b2bc13..149030b52c67 100644 --- a/substrate/frame/alliance/src/types.rs +++ b/substrate/frame/alliance/src/types.rs @@ -19,7 +19,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{traits::ConstU32, BoundedVec}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; /// A Multihash instance that only supports the basic functionality and no hashing. #[derive( diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 3a0e4f720f95..48cb25f90949 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-example-frame-crate" version = "0.0.1" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 6a4ef5c29ac8..964d6acb889a 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "pallet-mixnet" version = "0.4.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 9019a863ad81..a7967536079f 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -47,7 +47,7 @@ pub mod weights; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; use sp_runtime::traits::StaticLookup; -use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*}; pub use weights::WeightInfo; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs index 2bf2ebee0a4a..4be0776d6c1f 100644 --- a/substrate/frame/safe-mode/src/lib.rs +++ b/substrate/frame/safe-mode/src/lib.rs @@ -79,7 +79,6 @@ pub mod mock; mod tests; pub mod weights; -use core::convert::TryInto; use frame_support::{ defensive_assert, pallet_prelude::*, diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 09977142efc8..888b1d8f31fc 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -2,7 +2,7 @@ name = "pallet-sassafras" version = "0.3.5-dev" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" diff --git a/substrate/frame/transaction-payment/rpc/src/lib.rs b/substrate/frame/transaction-payment/rpc/src/lib.rs index f5323cf852e9..050c7fb8915e 100644 --- a/substrate/frame/transaction-payment/rpc/src/lib.rs +++ b/substrate/frame/transaction-payment/rpc/src/lib.rs @@ -17,7 +17,7 @@ //! RPC interface for the transaction payment pallet. -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; use codec::{Codec, Decode}; use jsonrpsee::{ diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs index 31be575fba7c..5904b5ed3162 100644 --- a/substrate/frame/tx-pause/src/lib.rs +++ b/substrate/frame/tx-pause/src/lib.rs @@ -87,7 +87,7 @@ use frame_support::{ }; use frame_system::pallet_prelude::*; use sp_runtime::{traits::Dispatchable, DispatchResult}; -use sp_std::{convert::TryInto, prelude::*}; +use sp_std::prelude::*; pub use pallet::*; pub use weights::*; diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 07304ed9b240..50348054da01 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -3,7 +3,7 @@ name = "sp-consensus-sassafras" version = "0.3.4-dev" authors.workspace = true description = "Primitives for Sassafras consensus" -edition = "2021" +edition.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/polkadot-sdk/" diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml index c6b5a065b6dc..463eaea8ea30 100644 --- a/substrate/primitives/core/fuzz/Cargo.toml +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -2,6 +2,7 @@ name = "sp-core-fuzz" version = "0.0.0" publish = false +edition.workspace = true [lints] workspace = true diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index 07840ca63cb2..166609ad922c 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -4,7 +4,7 @@ name = "sp-mixnet" version = "0.4.0" license = "Apache-2.0" authors = ["Parity Technologies "] -edition = "2021" +edition.workspace = true homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index ace88aee2628..8b6f746eaba0 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -33,7 +33,6 @@ use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, - iter::FromIterator, }; /// Simple Map-based Externalities impl. From 92a348f57deed44789511df73d3fbbbcb58d98cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Apr 2024 20:36:25 +0400 Subject: [PATCH 73/74] Bump snow from 0.9.3 to 0.9.6 (#4061) Bumps [snow](https://github.com/mcginty/snow) from 0.9.3 to 0.9.6.

Release notes

Sourced from snow's releases.

v0.9.6

  • Validate invalid PSK positions when building a Noise protocol.
  • Raise errors in various typos/mistakes in Noise patterns when parsing.
  • Deprecate the sodiumoxide backend, as that crate is no longer maintained. We may eventually migrate it to a maintaned version of the crate, but for now it's best to warn users.
  • Set a hard limit in read_message() in transport mode to 65535 to be fully compliant with the Noise specification.

Full Changelog: https://github.com/mcginty/snow/compare/v0.9.5...v0.9.6

v0.9.5

This is a security release that fixes a logic flaw in decryption in TransportState (i.e. the stateful one), where the nonce could increase even when decryption failed, which can cause a desync between the sender and receiver, opening this up as a denial of service vector if the attacker has the ability to inject packets in the channel Noise is talking over.

More details can be found in the advisory: https://github.com/mcginty/snow/security/advisories/GHSA-7g9j-g5jg-3vv3

All users are encouraged to update.

v0.9.4

This is a dependency version bump release because a couple of important dependencies released new versions that needed a Cargo.toml bump:

  • ring 0.17
  • pqcrypto-kyber 0.8
  • aes-gcm 0.10
  • chacha20poly1305 0.10
Commits
  • a4be73f meta: v0.9.6 release
  • 9e53dcf TransportState: limit read_message size to 65535
  • faf0560 Deprecate sodiumoxide resolver
  • 308a24d Add warnings about multiple calls to same method in Builder
  • f280991 Error when extraneous parameters are included in string to parse
  • dbdcc48 Error on duplicate modifiers in parameter string
  • 8b1a819 Validate PSK index in pattern to avoid panic
  • 74e30cf meta: v0.9.5 release
  • 12e8ae5 Stateful nonce desync fix
  • 02c26b7 Remove clap from simple example
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=snow&package-manager=cargo&previous-version=0.9.3&new-version=0.9.6)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/paritytech/polkadot-sdk/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 153 ++++++++--------------------------------------------- 1 file changed, 23 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d64800fb085e..67b0ad4def24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,15 +42,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "aead" version = "0.5.2" @@ -61,18 +52,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -84,31 +63,17 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle 2.5.0", -] - [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", + "aead", + "aes", "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "ctr", + "ghash", "subtle 2.5.0", ] @@ -2540,18 +2505,6 @@ dependencies = [ "keystream", ] -[[package]] -name = "chacha20" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "zeroize", -] - [[package]] name = "chacha20" version = "0.9.1" @@ -2565,14 +2518,14 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", - "chacha20 0.8.2", - "cipher 0.3.0", - "poly1305 0.7.2", + "aead", + "chacha20", + "cipher 0.4.4", + "poly1305", "zeroize", ] @@ -2652,15 +2605,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -2669,6 +2613,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -3676,15 +3621,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.9.2" @@ -6395,16 +6331,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.0" @@ -6412,7 +6338,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug 0.3.0", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -12032,7 +11958,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", "rand 0.8.5", - "rand_core 0.6.4", + "rand_core 0.5.1", "serde", "unicode-normalization", ] @@ -14568,17 +14494,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "poly1305" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" -dependencies = [ - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", -] - [[package]] name = "poly1305" version = "0.8.0" @@ -14587,19 +14502,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", -] - -[[package]] -name = "polyval" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -14611,7 +14514,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -17890,7 +17793,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ - "aead 0.5.2", + "aead", "arrayref", "arrayvec 0.7.4", "curve25519-dalek 4.1.2", @@ -18542,7 +18445,7 @@ dependencies = [ "bip39", "blake2-rfc", "bs58 0.5.0", - "chacha20 0.9.1", + "chacha20", "crossbeam-queue", "derive_more", "ed25519-zebra 4.0.3", @@ -18564,7 +18467,7 @@ dependencies = [ "num-traits", "pbkdf2", "pin-project", - "poly1305 0.8.0", + "poly1305", "rand 0.8.5", "rand_chacha 0.3.1", "ruzstd", @@ -18627,16 +18530,16 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.9.2", + "aes-gcm", "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek 4.1.2", "rand_core 0.6.4", - "ring 0.16.20", + "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.7", "subtle 2.5.0", @@ -19900,7 +19803,7 @@ dependencies = [ name = "sp-statement-store" version = "10.0.0" dependencies = [ - "aes-gcm 0.10.3", + "aes-gcm", "curve25519-dalek 4.1.2", "ed25519-dalek 2.1.0", "hkdf", @@ -22142,16 +22045,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.5.0", -] - [[package]] name = "universal-hash" version = "0.5.1" From f34d8e3cf033e2a22a41b505c437972a5dc83d78 Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:13:01 +0700 Subject: [PATCH 74/74] Remove hard-coded indices from pallet-xcm tests (#4248) # ISSUE - Link to issue: https://github.com/paritytech/polkadot-sdk/issues/4237 # DESCRIPTION Remove all ModuleError with hard-coded indices to pallet Error. For example: ```rs Err(DispatchError::Module(ModuleError { index: 4, error: [2, 0, 0, 0], message: Some("Filtered") })) ``` To ```rs let expected_result = Err(crate::Error::::Filtered.into()); assert_eq!(result, expected_result); ``` # TEST OUTCOME ``` test result: ok. 74 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.02s ``` --------- Co-authored-by: Oliver Tale-Yazdi --- .../pallet-xcm/src/tests/assets_transfer.rs | 218 ++++-------------- polkadot/xcm/pallet-xcm/src/tests/mod.rs | 6 +- 2 files changed, 40 insertions(+), 184 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index 7dc05c1cc70e..f42e220d6932 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -22,12 +22,12 @@ use crate::{ DispatchResult, OriginFor, }; use frame_support::{ - assert_ok, + assert_err, assert_ok, traits::{tokens::fungibles::Inspect, Currency}, weights::Weight, }; use polkadot_parachain_primitives::primitives::Id as ParaId; -use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use sp_runtime::traits::AccountIdConversion; use xcm::prelude::*; use xcm_executor::traits::ConvertLocation; @@ -112,14 +112,8 @@ fn limited_teleport_filtered_assets_disallowed() { 0, Unlimited, ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + let expected_result = Err(crate::Error::::Filtered.into()); + assert_eq!(result, expected_result); }); } @@ -365,11 +359,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works( /// Test `limited_teleport_assets` with local asset reserve and local fee reserve disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -527,11 +517,7 @@ fn transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -542,11 +528,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_ /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -633,11 +615,7 @@ fn remote_asset_reserve_and_local_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and local fee reserve is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -648,11 +626,7 @@ fn transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -662,11 +636,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disal /// Test `limited_teleport_assets` with remote asset reserve and local fee reserve is disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_local_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -745,7 +715,7 @@ fn local_asset_reserve_and_destination_fee_reserve_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } let weight = BaseXcmWeight::get() * 3; @@ -821,11 +791,7 @@ fn transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() /// disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -835,11 +801,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_ /// Test `limited_teleport_assets` with local asset reserve and destination fee reserve disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -993,11 +955,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_re /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_destination_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1102,11 +1060,7 @@ fn remote_asset_reserve_and_destination_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and destination fee reserve is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1117,11 +1071,7 @@ fn transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallo /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1132,11 +1082,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve /// disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_destination_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1222,11 +1168,7 @@ fn local_asset_reserve_and_remote_fee_reserve_call_disallowed( /// Test `transfer_assets` with local asset reserve and remote fee reserve is disallowed. #[test] fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1237,11 +1179,7 @@ fn transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() /// disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1251,11 +1189,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disal /// Test `limited_teleport_assets` with local asset reserve and remote fee reserve is disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1366,11 +1300,7 @@ fn destination_asset_reserve_and_remote_fee_reserve_call_disallowed( /// Test `transfer_assets` with destination asset reserve and remote fee reserve is disallowed. #[test] fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -1381,11 +1311,7 @@ fn transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallo /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1396,11 +1322,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve /// disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_remote_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -1485,7 +1407,7 @@ fn remote_asset_reserve_and_remote_fee_reserve_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } assert!(matches!( @@ -1558,11 +1480,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_work /// disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_remote_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_remote_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1702,11 +1620,7 @@ fn transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { /// Test `limited_reserve_transfer_assets` with local asset reserve and teleported fee disallowed. #[test] fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); local_asset_reserve_and_teleported_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1716,11 +1630,7 @@ fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_disallowe /// Test `limited_teleport_assets` with local asset reserve and teleported fee disallowed. #[test] fn teleport_assets_with_local_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); local_asset_reserve_and_teleported_fee_call( XcmPallet::limited_teleport_assets, expected_result, @@ -1802,7 +1712,7 @@ fn destination_asset_reserve_and_teleported_fee_call( assert_eq!(result, expected_result); if expected_result.is_err() { // short-circuit here for tests where we expect failure - return + return; } let weight = BaseXcmWeight::get() * 4; @@ -1891,11 +1801,7 @@ fn transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { /// disallowed. #[test] fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); destination_asset_reserve_and_teleported_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -1905,11 +1811,7 @@ fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_dis /// Test `limited_teleport_assets` with destination asset reserve and teleported fee disallowed. #[test] fn teleport_assets_with_destination_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); destination_asset_reserve_and_teleported_fee_call( XcmPallet::limited_teleport_assets, expected_result, @@ -2013,11 +1915,7 @@ fn remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( /// Test `transfer_assets` with remote asset reserve and teleported fee is disallowed. #[test] fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [22, 0, 0, 0], - message: Some("InvalidAssetUnsupportedReserve"), - })); + let expected_result = Err(crate::Error::::InvalidAssetUnsupportedReserve.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::transfer_assets, expected_result, @@ -2028,11 +1926,7 @@ fn transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { /// disallowed. #[test] fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [23, 0, 0, 0], - message: Some("TooManyReserves"), - })); + let expected_result = Err(crate::Error::::TooManyReserves.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2042,11 +1936,7 @@ fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallow /// Test `limited_teleport_assets` with remote asset reserve and teleported fee is disallowed. #[test] fn teleport_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); remote_asset_reserve_and_teleported_fee_reserve_call_disallowed( XcmPallet::limited_teleport_assets, expected_result, @@ -2088,14 +1978,7 @@ fn reserve_transfer_assets_with_teleportable_asset_disallowed() { fee_index as u32, Unlimited, ); - assert_eq!( - res, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + assert_err!(res, crate::Error::::Filtered); // Alice native asset is still same assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); // Alice USDT balance is still same @@ -2136,14 +2019,7 @@ fn transfer_assets_with_filtered_teleported_fee_disallowed() { fee_index as u32, Unlimited, ); - assert_eq!( - result, - Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered") - })) - ); + assert_err!(result, crate::Error::::Filtered); }); } @@ -2350,11 +2226,7 @@ fn transfer_assets_with_teleportable_asset_and_local_fee_reserve_works() { /// Test `limited_reserve_transfer_assets` with teleportable asset and local fee reserve disallowed. #[test] fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleport_asset_using_local_fee_reserve_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2364,11 +2236,7 @@ fn reserve_transfer_assets_with_teleportable_asset_and_local_fee_reserve_disallo /// Test `limited_teleport_assets` with teleportable asset and local fee reserve disallowed. #[test] fn teleport_assets_with_teleportable_asset_and_local_fee_reserve_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleport_asset_using_local_fee_reserve_call( XcmPallet::limited_teleport_assets, expected_result, @@ -2541,11 +2409,7 @@ fn transfer_teleported_assets_using_destination_reserve_fee_works() { /// disallowed. #[test] fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleported_asset_using_destination_reserve_fee_call( XcmPallet::limited_reserve_transfer_assets, expected_result, @@ -2555,11 +2419,7 @@ fn reserve_transfer_teleported_assets_using_destination_reserve_fee_disallowed() /// Test `limited_teleport_assets` with teleported asset reserve and destination fee disallowed. #[test] fn teleport_assets_using_destination_reserve_fee_disallowed() { - let expected_result = Err(DispatchError::Module(ModuleError { - index: 4, - error: [2, 0, 0, 0], - message: Some("Filtered"), - })); + let expected_result = Err(crate::Error::::Filtered.into()); teleported_asset_using_destination_reserve_fee_call( XcmPallet::limited_teleport_assets, expected_result, diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 8faf16e0d2a9..782c8bed478e 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -557,11 +557,7 @@ fn incomplete_execute_reverts_side_effects() { ), pays_fee: frame_support::dispatch::Pays::Yes, }, - error: sp_runtime::DispatchError::Module(sp_runtime::ModuleError { - index: 4, - error: [24, 0, 0, 0,], - message: Some("LocalExecutionIncomplete") - }) + error: sp_runtime::DispatchError::from(Error::::LocalExecutionIncomplete) }) ); });